repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Renzo-Olivares/android_kernel_htc_m7-gpe | arch/powerpc/platforms/ps3/gelic_udbg.c | 8968 | 6090 | /*
* udbg debug output routine via GELIC UDP broadcasts
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
* Copyright (C) 2010 Hector Martin <hector@marcansoft.com>
* Copyright (C) 2011 Andre Heider <a.heider@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*/
#include <asm/io.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#define GELIC_BUS_ID 1
#define GELIC_DEVICE_ID 0
#define GELIC_DEBUG_PORT 18194
#define GELIC_MAX_MESSAGE_SIZE 1000
#define GELIC_LV1_GET_MAC_ADDRESS 1
#define GELIC_LV1_GET_VLAN_ID 4
#define GELIC_LV1_VLAN_TX_ETHERNET_0 2
#define GELIC_DESCR_DMA_STAT_MASK 0xf0000000
#define GELIC_DESCR_DMA_CARDOWNED 0xa0000000
#define GELIC_DESCR_TX_DMA_IKE 0x00080000
#define GELIC_DESCR_TX_DMA_NO_CHKSUM 0x00000000
#define GELIC_DESCR_TX_DMA_FRAME_TAIL 0x00040000
#define GELIC_DESCR_DMA_CMD_NO_CHKSUM (GELIC_DESCR_DMA_CARDOWNED | \
GELIC_DESCR_TX_DMA_IKE | \
GELIC_DESCR_TX_DMA_NO_CHKSUM)
static u64 bus_addr;
struct gelic_descr {
/* as defined by the hardware */
__be32 buf_addr;
__be32 buf_size;
__be32 next_descr_addr;
__be32 dmac_cmd_status;
__be32 result_size;
__be32 valid_size; /* all zeroes for tx */
__be32 data_status;
__be32 data_error; /* all zeroes for tx */
} __attribute__((aligned(32)));
struct debug_block {
struct gelic_descr descr;
u8 pkt[1520];
} __packed;
struct ethhdr {
u8 dest[6];
u8 src[6];
u16 type;
} __packed;
struct vlantag {
u16 vlan;
u16 subtype;
} __packed;
struct iphdr {
u8 ver_len;
u8 dscp_ecn;
u16 total_length;
u16 ident;
u16 frag_off_flags;
u8 ttl;
u8 proto;
u16 checksum;
u32 src;
u32 dest;
} __packed;
struct udphdr {
u16 src;
u16 dest;
u16 len;
u16 checksum;
} __packed;
static __iomem struct ethhdr *h_eth;
static __iomem struct vlantag *h_vlan;
static __iomem struct iphdr *h_ip;
static __iomem struct udphdr *h_udp;
static __iomem char *pmsg;
static __iomem char *pmsgc;
static __iomem struct debug_block dbg __attribute__((aligned(32)));
static int header_size;
static void map_dma_mem(int bus_id, int dev_id, void *start, size_t len,
u64 *real_bus_addr)
{
s64 result;
u64 real_addr = ((u64)start) & 0x0fffffffffffffffUL;
u64 real_end = real_addr + len;
u64 map_start = real_addr & ~0xfff;
u64 map_end = (real_end + 0xfff) & ~0xfff;
u64 bus_addr = 0;
u64 flags = 0xf800000000000000UL;
result = lv1_allocate_device_dma_region(bus_id, dev_id,
map_end - map_start, 12, 0,
&bus_addr);
if (result)
lv1_panic(0);
result = lv1_map_device_dma_region(bus_id, dev_id, map_start,
bus_addr, map_end - map_start,
flags);
if (result)
lv1_panic(0);
*real_bus_addr = bus_addr + real_addr - map_start;
}
static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
{
s64 result;
u64 real_bus_addr;
real_bus_addr = bus_addr & ~0xfff;
len += bus_addr - real_bus_addr;
len = (len + 0xfff) & ~0xfff;
result = lv1_unmap_device_dma_region(bus_id, dev_id, real_bus_addr,
len);
if (result)
return result;
return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
}
static void gelic_debug_init(void)
{
s64 result;
u64 v2;
u64 mac;
u64 vlan_id;
result = lv1_open_device(GELIC_BUS_ID, GELIC_DEVICE_ID, 0);
if (result)
lv1_panic(0);
map_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID, &dbg, sizeof(dbg),
&bus_addr);
memset(&dbg, 0, sizeof(dbg));
dbg.descr.buf_addr = bus_addr + offsetof(struct debug_block, pkt);
wmb();
result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
GELIC_LV1_GET_MAC_ADDRESS, 0, 0, 0,
&mac, &v2);
if (result)
lv1_panic(0);
mac <<= 16;
h_eth = (struct ethhdr *)dbg.pkt;
memset(&h_eth->dest, 0xff, 6);
memcpy(&h_eth->src, &mac, 6);
header_size = sizeof(struct ethhdr);
result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
GELIC_LV1_GET_VLAN_ID,
GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
&vlan_id, &v2);
if (!result) {
h_eth->type = 0x8100;
header_size += sizeof(struct vlantag);
h_vlan = (struct vlantag *)(h_eth + 1);
h_vlan->vlan = vlan_id;
h_vlan->subtype = 0x0800;
h_ip = (struct iphdr *)(h_vlan + 1);
} else {
h_eth->type = 0x0800;
h_ip = (struct iphdr *)(h_eth + 1);
}
header_size += sizeof(struct iphdr);
h_ip->ver_len = 0x45;
h_ip->ttl = 10;
h_ip->proto = 0x11;
h_ip->src = 0x00000000;
h_ip->dest = 0xffffffff;
header_size += sizeof(struct udphdr);
h_udp = (struct udphdr *)(h_ip + 1);
h_udp->src = GELIC_DEBUG_PORT;
h_udp->dest = GELIC_DEBUG_PORT;
pmsgc = pmsg = (char *)(h_udp + 1);
}
static void gelic_debug_shutdown(void)
{
if (bus_addr)
unmap_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID,
bus_addr, sizeof(dbg));
lv1_close_device(GELIC_BUS_ID, GELIC_DEVICE_ID);
}
static void gelic_sendbuf(int msgsize)
{
u16 *p;
u32 sum;
int i;
dbg.descr.buf_size = header_size + msgsize;
h_ip->total_length = msgsize + sizeof(struct udphdr) +
sizeof(struct iphdr);
h_udp->len = msgsize + sizeof(struct udphdr);
h_ip->checksum = 0;
sum = 0;
p = (u16 *)h_ip;
for (i = 0; i < 5; i++)
sum += *p++;
h_ip->checksum = ~(sum + (sum >> 16));
dbg.descr.dmac_cmd_status = GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL;
dbg.descr.result_size = 0;
dbg.descr.data_status = 0;
wmb();
lv1_net_start_tx_dma(GELIC_BUS_ID, GELIC_DEVICE_ID, bus_addr, 0);
while ((dbg.descr.dmac_cmd_status & GELIC_DESCR_DMA_STAT_MASK) ==
GELIC_DESCR_DMA_CARDOWNED)
cpu_relax();
}
static void ps3gelic_udbg_putc(char ch)
{
*pmsgc++ = ch;
if (ch == '\n' || (pmsgc-pmsg) >= GELIC_MAX_MESSAGE_SIZE) {
gelic_sendbuf(pmsgc-pmsg);
pmsgc = pmsg;
}
}
void __init udbg_init_ps3gelic(void)
{
gelic_debug_init();
udbg_putc = ps3gelic_udbg_putc;
}
void udbg_shutdown_ps3gelic(void)
{
udbg_putc = NULL;
gelic_debug_shutdown();
}
EXPORT_SYMBOL(udbg_shutdown_ps3gelic);
| gpl-2.0 |
wenfengliaoshuzhai/linux | drivers/xen/xen-pciback/conf_space_capability.c | 10248 | 4676 | /*
* PCI Backend - Handles the virtual fields found on the capability lists
* in the configuration space.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
static LIST_HEAD(capabilities);
struct xen_pcibk_config_capability {
struct list_head cap_list;
int capability;
/* If the device has the capability found above, add these fields */
const struct config_field *fields;
};
static const struct config_field caplist_header[] = {
{
.offset = PCI_CAP_LIST_ID,
.size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = NULL,
},
{}
};
static inline void register_capability(struct xen_pcibk_config_capability *cap)
{
list_add_tail(&cap->cap_list, &capabilities);
}
int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
{
int err = 0;
struct xen_pcibk_config_capability *cap;
int cap_offset;
list_for_each_entry(cap, &capabilities, cap_list) {
cap_offset = pci_find_capability(dev, cap->capability);
if (cap_offset) {
dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
cap->capability, cap_offset);
err = xen_pcibk_config_add_fields_offset(dev,
caplist_header,
cap_offset);
if (err)
goto out;
err = xen_pcibk_config_add_fields_offset(dev,
cap->fields,
cap_offset);
if (err)
goto out;
}
}
out:
return err;
}
static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
void *data)
{
/* Disallow writes to the vital product data */
if (value & PCI_VPD_ADDR_F)
return PCIBIOS_SET_FAILED;
else
return pci_write_config_word(dev, offset, value);
}
static const struct config_field caplist_vpd[] = {
{
.offset = PCI_VPD_ADDR,
.size = 2,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = vpd_address_write,
},
{
.offset = PCI_VPD_DATA,
.size = 4,
.u.dw.read = xen_pcibk_read_config_dword,
.u.dw.write = NULL,
},
{}
};
static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
void *data)
{
int err;
u16 real_value;
err = pci_read_config_word(dev, offset, &real_value);
if (err)
goto out;
*value = real_value & ~PCI_PM_CAP_PME_MASK;
out:
return err;
}
/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
* Can't allow driver domain to enable PMEs - they're shared */
#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
void *data)
{
int err;
u16 old_value;
pci_power_t new_state, old_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
if ((old_value & PM_OK_BITS) != new_value) {
new_value = (old_value & ~PM_OK_BITS) | new_value;
err = pci_write_config_word(dev, offset, new_value);
if (err)
goto out;
}
/* Let pci core handle the power management change */
dev_dbg(&dev->dev, "set power state to %x\n", new_state);
err = pci_set_power_state(dev, new_state);
if (err) {
err = PCIBIOS_SET_FAILED;
goto out;
}
out:
return err;
}
/* Ensure PMEs are disabled */
static void *pm_ctrl_init(struct pci_dev *dev, int offset)
{
int err;
u16 value;
err = pci_read_config_word(dev, offset, &value);
if (err)
goto out;
if (value & PCI_PM_CTRL_PME_ENABLE) {
value &= ~PCI_PM_CTRL_PME_ENABLE;
err = pci_write_config_word(dev, offset, value);
}
out:
return ERR_PTR(err);
}
static const struct config_field caplist_pm[] = {
{
.offset = PCI_PM_PMC,
.size = 2,
.u.w.read = pm_caps_read,
},
{
.offset = PCI_PM_CTRL,
.size = 2,
.init = pm_ctrl_init,
.u.w.read = xen_pcibk_read_config_word,
.u.w.write = pm_ctrl_write,
},
{
.offset = PCI_PM_PPB_EXTENSIONS,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{
.offset = PCI_PM_DATA_REGISTER,
.size = 1,
.u.b.read = xen_pcibk_read_config_byte,
},
{}
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
.capability = PCI_CAP_ID_PM,
.fields = caplist_pm,
};
static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
.capability = PCI_CAP_ID_VPD,
.fields = caplist_vpd,
};
int xen_pcibk_config_capability_init(void)
{
register_capability(&xen_pcibk_config_capability_vpd);
register_capability(&xen_pcibk_config_capability_pm);
return 0;
}
| gpl-2.0 |
mekinik232/ambipi | xbmc/utils/log.cpp | 9 | 6532 | /*
* Copyright (C) 2005-2012 Team XBMC
* http://www.xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "system.h"
#include "log.h"
#include "stdio_utf8.h"
#include "stat_utf8.h"
#include "threads/CriticalSection.h"
#include "threads/SingleLock.h"
#include "threads/Thread.h"
#include "utils/StdString.h"
#if defined(TARGET_ANDROID)
#include "android/activity/XBMCApp.h"
#elif defined(TARGET_WINDOWS)
#include "win32/WIN32Util.h"
#endif
#define critSec XBMC_GLOBAL_USE(CLog::CLogGlobals).critSec
#define m_file XBMC_GLOBAL_USE(CLog::CLogGlobals).m_file
#define m_repeatCount XBMC_GLOBAL_USE(CLog::CLogGlobals).m_repeatCount
#define m_repeatLogLevel XBMC_GLOBAL_USE(CLog::CLogGlobals).m_repeatLogLevel
#define m_repeatLine XBMC_GLOBAL_USE(CLog::CLogGlobals).m_repeatLine
#define m_logLevel XBMC_GLOBAL_USE(CLog::CLogGlobals).m_logLevel
static char levelNames[][8] =
{"DEBUG", "INFO", "NOTICE", "WARNING", "ERROR", "SEVERE", "FATAL", "NONE"};
CLog::CLog()
{}
CLog::~CLog()
{}
void CLog::Close()
{
CSingleLock waitLock(critSec);
if (m_file)
{
fclose(m_file);
m_file = NULL;
}
m_repeatLine.clear();
}
void CLog::Log(int loglevel, const char *format, ... )
{
static const char* prefixFormat = "%02.2d:%02.2d:%02.2d T:%"PRIu64" %7s: ";
CSingleLock waitLock(critSec);
#if !(defined(_DEBUG) || defined(PROFILE))
if (m_logLevel > LOG_LEVEL_NORMAL ||
(m_logLevel > LOG_LEVEL_NONE && loglevel >= LOGNOTICE))
#endif
{
if (!m_file)
return;
SYSTEMTIME time;
GetLocalTime(&time);
CStdString strPrefix, strData;
strData.reserve(16384);
va_list va;
va_start(va, format);
strData.FormatV(format,va);
va_end(va);
if (m_repeatLogLevel == loglevel && m_repeatLine == strData)
{
m_repeatCount++;
return;
}
else if (m_repeatCount)
{
CStdString strData2;
strPrefix.Format(prefixFormat, time.wHour, time.wMinute, time.wSecond, (uint64_t)CThread::GetCurrentThreadId(), levelNames[m_repeatLogLevel]);
strData2.Format("Previous line repeats %d times." LINE_ENDING, m_repeatCount);
fputs(strPrefix.c_str(), m_file);
fputs(strData2.c_str(), m_file);
OutputDebugString(strData2);
m_repeatCount = 0;
}
m_repeatLine = strData;
m_repeatLogLevel = loglevel;
unsigned int length = 0;
while ( length != strData.length() )
{
length = strData.length();
strData.TrimRight(" ");
strData.TrimRight('\n');
strData.TrimRight("\r");
}
if (!length)
return;
OutputDebugString(strData);
/* fixup newline alignment, number of spaces should equal prefix length */
strData.Replace("\n", LINE_ENDING" ");
strData += LINE_ENDING;
strPrefix.Format(prefixFormat, time.wHour, time.wMinute, time.wSecond, (uint64_t)CThread::GetCurrentThreadId(), levelNames[loglevel]);
//print to adb
#if defined(TARGET_ANDROID) && defined(_DEBUG)
CXBMCApp::android_printf("%s%s",strPrefix.c_str(), strData.c_str());
#endif
fputs(strPrefix.c_str(), m_file);
fputs(strData.c_str(), m_file);
fflush(m_file);
}
}
bool CLog::Init(const char* path)
{
CSingleLock waitLock(critSec);
if (!m_file)
{
// g_settings.m_logFolder is initialized in the CSettings constructor
// and changed in CApplication::Create()
CStdString strLogFile, strLogFileOld;
strLogFile.Format("%sxbmc.log", path);
strLogFileOld.Format("%sxbmc.old.log", path);
#if defined(TARGET_WINDOWS)
// the appdata folder might be redirected to an unc share
// convert smb to unc path that stat and fopen can handle it
strLogFile = CWIN32Util::SmbToUnc(strLogFile);
strLogFileOld = CWIN32Util::SmbToUnc(strLogFileOld);
#endif
struct stat64 info;
if (stat64_utf8(strLogFileOld.c_str(),&info) == 0 &&
remove_utf8(strLogFileOld.c_str()) != 0)
return false;
if (stat64_utf8(strLogFile.c_str(),&info) == 0 &&
rename_utf8(strLogFile.c_str(),strLogFileOld.c_str()) != 0)
return false;
m_file = fopen64_utf8(strLogFile.c_str(),"wb");
}
if (m_file)
{
unsigned char BOM[3] = {0xEF, 0xBB, 0xBF};
fwrite(BOM, sizeof(BOM), 1, m_file);
}
return m_file != NULL;
}
void CLog::MemDump(char *pData, int length)
{
Log(LOGDEBUG, "MEM_DUMP: Dumping from %p", pData);
for (int i = 0; i < length; i+=16)
{
CStdString strLine;
strLine.Format("MEM_DUMP: %04x ", i);
char *alpha = pData;
for (int k=0; k < 4 && i + 4*k < length; k++)
{
for (int j=0; j < 4 && i + 4*k + j < length; j++)
{
CStdString strFormat;
strFormat.Format(" %02x", (unsigned char)*pData++);
strLine += strFormat;
}
strLine += " ";
}
// pad with spaces
while (strLine.size() < 13*4 + 16)
strLine += " ";
for (int j=0; j < 16 && i + j < length; j++)
{
if (*alpha > 31)
strLine += *alpha;
else
strLine += '.';
alpha++;
}
Log(LOGDEBUG, "%s", strLine.c_str());
}
}
void CLog::SetLogLevel(int level)
{
CSingleLock waitLock(critSec);
m_logLevel = level;
CLog::Log(LOGNOTICE, "Log level changed to %d", m_logLevel);
}
int CLog::GetLogLevel()
{
return m_logLevel;
}
void CLog::OutputDebugString(const std::string& line)
{
#if defined(_DEBUG) || defined(PROFILE)
#if defined(TARGET_WINDOWS)
// we can't use charsetconverter here as it's initialized later than CLog and deinitialized early
int bufSize = MultiByteToWideChar(CP_UTF8, 0, line.c_str(), -1, NULL, 0);
CStdStringW wstr (L"", bufSize);
if ( MultiByteToWideChar(CP_UTF8, 0, line.c_str(), -1, wstr.GetBuf(bufSize), bufSize) == bufSize )
{
wstr.RelBuf();
::OutputDebugStringW(wstr.c_str());
}
else
#endif // TARGET_WINDOWS
::OutputDebugString(line.c_str());
::OutputDebugString("\n");
#endif
}
| gpl-2.0 |
haroruhomer/rainmeter | Plugins/PluginPerfMon/ObjInst.cpp | 9 | 3463 | //====================================
// File: OBJINST.CPP
// Author: Matt Pietrek
// From: Microsoft Systems Journal
// "Under the Hood", April 1996
//====================================
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <winperf.h>
#include <stdlib.h>
#pragma hdrstop
#include "titledb.h"
#include "objinst.h"
#include "perfcntr.h"
#include "makeptr.h"
CPerfObjectInstance::CPerfObjectInstance(
PPERF_INSTANCE_DEFINITION const pPerfInstDef,
PPERF_COUNTER_DEFINITION const pPerfCntrDef,
DWORD nCounters, CPerfTitleDatabase * const pPerfCounterTitles,
BOOL fDummy)
{
m_pPerfInstDef = pPerfInstDef;
m_pPerfCntrDef = pPerfCntrDef;
m_nCounters = nCounters;
m_pPerfCounterTitles = pPerfCounterTitles;
m_fDummy = fDummy;
}
BOOL
CPerfObjectInstance::GetObjectInstanceName(
PTSTR pszObjInstName, DWORD nSize )
{
if ( m_fDummy )
{
*pszObjInstName = 0; // Return an empty string
return FALSE;
}
if ( nSize < (m_pPerfInstDef->NameLength / sizeof(TCHAR)) )
return FALSE;
PWSTR pszName = MakePtr(PWSTR, m_pPerfInstDef, m_pPerfInstDef->NameOffset);
#ifdef UNICODE
lstrcpy( pszObjInstName, pszName );
#else
wcstombs( pszObjInstName, pszName, nSize );
#endif
return TRUE;
}
CPerfCounter *
CPerfObjectInstance::MakeCounter( PPERF_COUNTER_DEFINITION const pCounterDef )
{
// Look up the name of this counter in the title database
PTSTR pszName = m_pPerfCounterTitles->GetTitleStringFromIndex(
pCounterDef->CounterNameTitleIndex );
DWORD nInstanceDefSize = m_fDummy ? 0 : m_pPerfInstDef->ByteLength;
// Create a new CPerfCounter. The caller is responsible for deleting it.
return new CPerfCounter(pszName,
pCounterDef->CounterType,
MakePtr( PBYTE, m_pPerfInstDef,
nInstanceDefSize +
pCounterDef->CounterOffset ),
pCounterDef->CounterSize );
}
CPerfCounter *
CPerfObjectInstance::GetCounterByIndex( DWORD index )
{
PPERF_COUNTER_DEFINITION pCurrentCounter;
if ( index >= m_nCounters )
return 0;
pCurrentCounter = m_pPerfCntrDef;
// Find the correct PERF_COUNTER_DEFINITION by looping
for ( DWORD i = 0; i < index; i++ )
{
pCurrentCounter = MakePtr( PPERF_COUNTER_DEFINITION,
pCurrentCounter,
pCurrentCounter->ByteLength );
}
if ( pCurrentCounter->ByteLength == 0 )
return 0;
return MakeCounter( pCurrentCounter );
}
CPerfCounter *
CPerfObjectInstance::GetFirstCounter( void )
{
m_currentCounter = 0;
return GetCounterByIndex( m_currentCounter );
}
CPerfCounter *
CPerfObjectInstance::GetNextCounter( void )
{
m_currentCounter++;
return GetCounterByIndex( m_currentCounter );
}
CPerfCounter *
CPerfObjectInstance::GetCounterByName( PCTSTR const pszName )
{
DWORD cntrIdx = m_pPerfCounterTitles->GetIndexFromTitleString(pszName);
if ( cntrIdx == 0 )
return 0;
PPERF_COUNTER_DEFINITION pCurrentCounter = m_pPerfCntrDef;
// Find the correct PERF_COUNTER_DEFINITION by looping and comparing
for ( DWORD i = 0; i < m_nCounters; i++ )
{
if ( pCurrentCounter->CounterNameTitleIndex == cntrIdx )
return MakeCounter( pCurrentCounter );
// Nope. Not this one. Advance to the next counter
pCurrentCounter = MakePtr( PPERF_COUNTER_DEFINITION,
pCurrentCounter,
pCurrentCounter->ByteLength );
}
return 0;
} | gpl-2.0 |
TelekomCloud/libvirt | tests/qemumonitortestutils.c | 9 | 24862 | /*
* Copyright (C) 2011-2013 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "qemumonitortestutils.h"
#include "virthread.h"
#include "qemu/qemu_processpriv.h"
#include "qemu/qemu_monitor.h"
#include "qemu/qemu_agent.h"
#include "rpc/virnetsocket.h"
#include "viralloc.h"
#include "virlog.h"
#include "virerror.h"
#include "virstring.h"
#define VIR_FROM_THIS VIR_FROM_NONE
struct _qemuMonitorTestItem {
qemuMonitorTestResponseCallback cb;
void *opaque;
virFreeCallback freecb;
};
struct _qemuMonitorTest {
virMutex lock;
virThread thread;
bool json;
bool quit;
bool running;
bool started;
char *incoming;
size_t incomingLength;
size_t incomingCapacity;
char *outgoing;
size_t outgoingLength;
size_t outgoingCapacity;
virNetSocketPtr server;
virNetSocketPtr client;
qemuMonitorPtr mon;
qemuAgentPtr agent;
char *tmpdir;
size_t nitems;
qemuMonitorTestItemPtr *items;
virDomainObjPtr vm;
};
static void
qemuMonitorTestItemFree(qemuMonitorTestItemPtr item)
{
if (!item)
return;
if (item->freecb)
(item->freecb)(item->opaque);
VIR_FREE(item);
}
/*
* Appends data for a reply to the outgoing buffer
*/
int
qemuMonitorTestAddReponse(qemuMonitorTestPtr test,
const char *response)
{
size_t want = strlen(response) + 2;
size_t have = test->outgoingCapacity - test->outgoingLength;
VIR_DEBUG("Adding response to monitor command: '%s", response);
if (have < want) {
size_t need = want - have;
if (VIR_EXPAND_N(test->outgoing, test->outgoingCapacity, need) < 0)
return -1;
}
want -= 2;
memcpy(test->outgoing + test->outgoingLength, response, want);
memcpy(test->outgoing + test->outgoingLength + want, "\r\n", 2);
test->outgoingLength += want + 2;
return 0;
}
int
qemuMonitorTestAddUnexpectedErrorResponse(qemuMonitorTestPtr test)
{
if (test->agent || test->json) {
return qemuMonitorTestAddReponse(test,
"{ \"error\": "
" { \"desc\": \"Unexpected command\", "
" \"class\": \"UnexpectedCommand\" } }");
} else {
return qemuMonitorTestAddReponse(test, "unexpected command");
}
}
int ATTRIBUTE_FMT_PRINTF(2, 3)
qemuMonitorReportError(qemuMonitorTestPtr test, const char *errmsg, ...)
{
va_list msgargs;
char *msg = NULL;
char *jsonmsg = NULL;
int ret = -1;
va_start(msgargs, errmsg);
if (virVasprintf(&msg, errmsg, msgargs) < 0)
goto cleanup;
if (test->agent || test->json) {
if (virAsprintf(&jsonmsg, "{ \"error\": "
" { \"desc\": \"%s\", "
" \"class\": \"UnexpectedCommand\" } }",
msg) < 0)
goto cleanup;
} else {
if (virAsprintf(&jsonmsg, "error: '%s'", msg) < 0)
goto cleanup;
}
ret = qemuMonitorTestAddReponse(test, jsonmsg);
cleanup:
va_end(msgargs);
VIR_FREE(msg);
VIR_FREE(jsonmsg);
return ret;
}
static int
qemuMonitorTestProcessCommand(qemuMonitorTestPtr test,
const char *cmdstr)
{
int ret;
VIR_DEBUG("Processing string from monitor handler: '%s", cmdstr);
if (test->nitems == 0) {
return qemuMonitorTestAddUnexpectedErrorResponse(test);
} else {
qemuMonitorTestItemPtr item = test->items[0];
ret = (item->cb)(test, item, cmdstr);
qemuMonitorTestItemFree(item);
if (VIR_DELETE_ELEMENT(test->items, 0, test->nitems) < 0)
return -1;
}
return ret;
}
/*
* Handles read/write of monitor data on the monitor server side
*/
static void
qemuMonitorTestIO(virNetSocketPtr sock,
int events,
void *opaque)
{
qemuMonitorTestPtr test = opaque;
bool err = false;
virMutexLock(&test->lock);
if (test->quit) {
virMutexUnlock(&test->lock);
return;
}
if (events & VIR_EVENT_HANDLE_WRITABLE) {
ssize_t ret;
if ((ret = virNetSocketWrite(sock,
test->outgoing,
test->outgoingLength)) < 0) {
err = true;
goto cleanup;
}
memmove(test->outgoing,
test->outgoing + ret,
test->outgoingLength - ret);
test->outgoingLength -= ret;
if ((test->outgoingCapacity - test->outgoingLength) > 1024)
VIR_SHRINK_N(test->outgoing, test->outgoingCapacity, 1024);
}
if (events & VIR_EVENT_HANDLE_READABLE) {
ssize_t ret, used;
char *t1, *t2;
if ((test->incomingCapacity - test->incomingLength) < 1024) {
if (VIR_EXPAND_N(test->incoming, test->incomingCapacity, 1024) < 0) {
err = true;
goto cleanup;
}
}
if ((ret = virNetSocketRead(sock,
test->incoming + test->incomingLength,
(test->incomingCapacity - test->incomingLength) - 1)) < 0) {
err = true;
goto cleanup;
}
test->incomingLength += ret;
test->incoming[test->incomingLength] = '\0';
/* Look to see if we've got a complete line, and
* if so, handle that command
*/
t1 = test->incoming;
while ((t2 = strstr(t1, "\n"))) {
*t2 = '\0';
if (qemuMonitorTestProcessCommand(test, t1) < 0) {
err = true;
goto cleanup;
}
t1 = t2 + 1;
}
used = t1 - test->incoming;
memmove(test->incoming, t1, test->incomingLength - used);
test->incomingLength -= used;
if ((test->incomingCapacity - test->incomingLength) > 1024) {
VIR_SHRINK_N(test->incoming,
test->incomingCapacity,
1024);
}
}
if (events & (VIR_EVENT_HANDLE_HANGUP |
VIR_EVENT_HANDLE_ERROR))
err = true;
cleanup:
if (err) {
virNetSocketRemoveIOCallback(sock);
virNetSocketClose(sock);
virObjectUnref(test->client);
test->client = NULL;
} else {
events = VIR_EVENT_HANDLE_READABLE;
if (test->outgoingLength)
events |= VIR_EVENT_HANDLE_WRITABLE;
virNetSocketUpdateIOCallback(sock, events);
}
virMutexUnlock(&test->lock);
}
static void
qemuMonitorTestWorker(void *opaque)
{
qemuMonitorTestPtr test = opaque;
virMutexLock(&test->lock);
while (!test->quit) {
virMutexUnlock(&test->lock);
if (virEventRunDefaultImpl() < 0) {
virMutexLock(&test->lock);
test->quit = true;
break;
}
virMutexLock(&test->lock);
}
test->running = false;
virMutexUnlock(&test->lock);
return;
}
static void
qemuMonitorTestFreeTimer(int timer ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
/* nothing to be done here */
}
void
qemuMonitorTestFree(qemuMonitorTestPtr test)
{
size_t i;
int timer = -1;
if (!test)
return;
virMutexLock(&test->lock);
if (test->running) {
test->quit = true;
/* HACK: Add a dummy timeout to break event loop */
timer = virEventAddTimeout(0, qemuMonitorTestFreeTimer, NULL, NULL);
}
virMutexUnlock(&test->lock);
if (test->client) {
virNetSocketRemoveIOCallback(test->client);
virNetSocketClose(test->client);
virObjectUnref(test->client);
}
virObjectUnref(test->server);
if (test->mon) {
virObjectUnlock(test->mon);
qemuMonitorClose(test->mon);
}
if (test->agent) {
virObjectUnlock(test->agent);
qemuAgentClose(test->agent);
}
virObjectUnref(test->vm);
if (test->started)
virThreadJoin(&test->thread);
if (timer != -1)
virEventRemoveTimeout(timer);
VIR_FREE(test->incoming);
VIR_FREE(test->outgoing);
for (i = 0; i < test->nitems; i++)
qemuMonitorTestItemFree(test->items[i]);
VIR_FREE(test->items);
if (test->tmpdir && rmdir(test->tmpdir) < 0)
VIR_WARN("Failed to remove tempdir: %s", strerror(errno));
VIR_FREE(test->tmpdir);
virMutexDestroy(&test->lock);
VIR_FREE(test);
}
int
qemuMonitorTestAddHandler(qemuMonitorTestPtr test,
qemuMonitorTestResponseCallback cb,
void *opaque,
virFreeCallback freecb)
{
qemuMonitorTestItemPtr item;
if (VIR_ALLOC(item) < 0)
goto error;
item->cb = cb;
item->freecb = freecb;
item->opaque = opaque;
virMutexLock(&test->lock);
if (VIR_APPEND_ELEMENT(test->items, test->nitems, item) < 0) {
virMutexUnlock(&test->lock);
goto error;
}
virMutexUnlock(&test->lock);
return 0;
error:
if (freecb)
(freecb)(opaque);
VIR_FREE(item);
return -1;
}
void *
qemuMonitorTestItemGetPrivateData(qemuMonitorTestItemPtr item)
{
return item ? item->opaque : NULL;
}
typedef struct _qemuMonitorTestCommandArgs qemuMonitorTestCommandArgs;
typedef qemuMonitorTestCommandArgs *qemuMonitorTestCommandArgsPtr;
struct _qemuMonitorTestCommandArgs {
char *argname;
char *argval;
};
struct qemuMonitorTestHandlerData {
char *command_name;
char *response;
size_t nargs;
qemuMonitorTestCommandArgsPtr args;
};
static void
qemuMonitorTestHandlerDataFree(void *opaque)
{
struct qemuMonitorTestHandlerData *data = opaque;
size_t i;
if (!data)
return;
for (i = 0; i < data->nargs; i++) {
VIR_FREE(data->args[i].argname);
VIR_FREE(data->args[i].argval);
}
VIR_FREE(data->command_name);
VIR_FREE(data->response);
VIR_FREE(data->args);
VIR_FREE(data);
}
static int
qemuMonitorTestProcessCommandDefault(qemuMonitorTestPtr test,
qemuMonitorTestItemPtr item,
const char *cmdstr)
{
struct qemuMonitorTestHandlerData *data = item->opaque;
virJSONValuePtr val = NULL;
char *cmdcopy = NULL;
const char *cmdname;
char *tmp;
int ret = -1;
if (test->agent || test->json) {
if (!(val = virJSONValueFromString(cmdstr)))
return -1;
if (!(cmdname = virJSONValueObjectGetString(val, "execute"))) {
ret = qemuMonitorReportError(test, "Missing command name in %s", cmdstr);
goto cleanup;
}
} else {
if (VIR_STRDUP(cmdcopy, cmdstr) < 0)
return -1;
cmdname = cmdcopy;
if (!(tmp = strchr(cmdcopy, ' '))) {
ret = qemuMonitorReportError(test,
"Cannot find command name in '%s'",
cmdstr);
goto cleanup;
}
*tmp = '\0';
}
if (data->command_name && STRNEQ(data->command_name, cmdname))
ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
else
ret = qemuMonitorTestAddReponse(test, data->response);
cleanup:
VIR_FREE(cmdcopy);
virJSONValueFree(val);
return ret;
}
int
qemuMonitorTestAddItem(qemuMonitorTestPtr test,
const char *command_name,
const char *response)
{
struct qemuMonitorTestHandlerData *data;
if (VIR_ALLOC(data) < 0)
return -1;
if (VIR_STRDUP(data->command_name, command_name) < 0 ||
VIR_STRDUP(data->response, response) < 0) {
qemuMonitorTestHandlerDataFree(data);
return -1;
}
return qemuMonitorTestAddHandler(test,
qemuMonitorTestProcessCommandDefault,
data, qemuMonitorTestHandlerDataFree);
}
static int
qemuMonitorTestProcessGuestAgentSync(qemuMonitorTestPtr test,
qemuMonitorTestItemPtr item ATTRIBUTE_UNUSED,
const char *cmdstr)
{
virJSONValuePtr val = NULL;
virJSONValuePtr args;
unsigned long long id;
const char *cmdname;
char *retmsg = NULL;
int ret = -1;
if (!(val = virJSONValueFromString(cmdstr)))
return -1;
if (!(cmdname = virJSONValueObjectGetString(val, "execute"))) {
ret = qemuMonitorReportError(test, "Missing guest-sync command name");
goto cleanup;
}
if (STRNEQ(cmdname, "guest-sync")) {
ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
goto cleanup;
}
if (!(args = virJSONValueObjectGet(val, "arguments"))) {
ret = qemuMonitorReportError(test, "Missing arguments for guest-sync");
goto cleanup;
}
if (virJSONValueObjectGetNumberUlong(args, "id", &id)) {
ret = qemuMonitorReportError(test, "Missing id for guest sync");
goto cleanup;
}
if (virAsprintf(&retmsg, "{\"return\":%llu}", id) < 0)
goto cleanup;
ret = qemuMonitorTestAddReponse(test, retmsg);
cleanup:
virJSONValueFree(val);
VIR_FREE(retmsg);
return ret;
}
int
qemuMonitorTestAddAgentSyncResponse(qemuMonitorTestPtr test)
{
if (!test->agent) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
"This test is not an agent test");
return -1;
}
return qemuMonitorTestAddHandler(test,
qemuMonitorTestProcessGuestAgentSync,
NULL, NULL);
}
static int
qemuMonitorTestProcessCommandWithArgs(qemuMonitorTestPtr test,
qemuMonitorTestItemPtr item,
const char *cmdstr)
{
struct qemuMonitorTestHandlerData *data = item->opaque;
virJSONValuePtr val = NULL;
virJSONValuePtr args;
virJSONValuePtr argobj;
char *argstr = NULL;
const char *cmdname;
size_t i;
int ret = -1;
if (!(val = virJSONValueFromString(cmdstr)))
return -1;
if (!(cmdname = virJSONValueObjectGetString(val, "execute"))) {
ret = qemuMonitorReportError(test, "Missing command name in %s", cmdstr);
goto cleanup;
}
if (data->command_name &&
STRNEQ(data->command_name, cmdname)) {
ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
goto cleanup;
}
if (!(args = virJSONValueObjectGet(val, "arguments"))) {
ret = qemuMonitorReportError(test,
"Missing arguments section for command '%s'",
NULLSTR(data->command_name));
goto cleanup;
}
/* validate the args */
for (i = 0; i < data->nargs; i++) {
qemuMonitorTestCommandArgsPtr arg = &data->args[i];
if (!(argobj = virJSONValueObjectGet(args, arg->argname))) {
ret = qemuMonitorReportError(test,
"Missing argument '%s' for command '%s'",
arg->argname,
NULLSTR(data->command_name));
goto cleanup;
}
/* convert the argument to string */
if (!(argstr = virJSONValueToString(argobj, false)))
goto cleanup;
/* verify that the argument value is expected */
if (STRNEQ(argstr, arg->argval)) {
ret = qemuMonitorReportError(test,
"Invalid value of argument '%s' "
"of command '%s': "
"expected '%s' got '%s'",
arg->argname,
NULLSTR(data->command_name),
arg->argval, argstr);
goto cleanup;
}
VIR_FREE(argstr);
}
/* arguments checked out, return the response */
ret = qemuMonitorTestAddReponse(test, data->response);
cleanup:
VIR_FREE(argstr);
virJSONValueFree(val);
return ret;
}
/* this allows to add a responder that is able to check
* a (shallow) structure of arguments for a command */
int
qemuMonitorTestAddItemParams(qemuMonitorTestPtr test,
const char *cmdname,
const char *response,
...)
{
struct qemuMonitorTestHandlerData *data;
const char *argname;
const char *argval;
va_list args;
va_start(args, response);
if (VIR_ALLOC(data) < 0)
goto error;
if (VIR_STRDUP(data->command_name, cmdname) < 0 ||
VIR_STRDUP(data->response, response) < 0)
goto error;
while ((argname = va_arg(args, char *))) {
size_t i;
if (!(argval = va_arg(args, char *))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"Missing argument value for argument '%s'",
argname);
goto error;
}
i = data->nargs;
if (VIR_EXPAND_N(data->args, data->nargs, 1))
goto error;
if (VIR_STRDUP(data->args[i].argname, argname) < 0 ||
VIR_STRDUP(data->args[i].argval, argval) < 0)
goto error;
}
va_end(args);
return qemuMonitorTestAddHandler(test,
qemuMonitorTestProcessCommandWithArgs,
data, qemuMonitorTestHandlerDataFree);
error:
va_end(args);
qemuMonitorTestHandlerDataFree(data);
return -1;
}
static void
qemuMonitorTestEOFNotify(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
virDomainObjPtr vm ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
}
static void
qemuMonitorTestErrorNotify(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
virDomainObjPtr vm ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
}
static qemuMonitorCallbacks qemuMonitorTestCallbacks = {
.eofNotify = qemuMonitorTestEOFNotify,
.errorNotify = qemuMonitorTestErrorNotify,
.domainDeviceDeleted = qemuProcessHandleDeviceDeleted,
};
static void
qemuMonitorTestAgentNotify(qemuAgentPtr agent ATTRIBUTE_UNUSED,
virDomainObjPtr vm ATTRIBUTE_UNUSED)
{
}
static qemuAgentCallbacks qemuMonitorTestAgentCallbacks = {
.eofNotify = qemuMonitorTestAgentNotify,
.errorNotify = qemuMonitorTestAgentNotify,
};
static qemuMonitorTestPtr
qemuMonitorCommonTestNew(virDomainXMLOptionPtr xmlopt,
virDomainObjPtr vm,
virDomainChrSourceDefPtr src)
{
qemuMonitorTestPtr test = NULL;
char *path = NULL;
char *tmpdir_template = NULL;
if (VIR_ALLOC(test) < 0)
goto error;
if (virMutexInit(&test->lock) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
"Cannot initialize mutex");
VIR_FREE(test);
return NULL;
}
if (VIR_STRDUP(tmpdir_template, "/tmp/libvirt_XXXXXX") < 0)
goto error;
if (!(test->tmpdir = mkdtemp(tmpdir_template))) {
virReportSystemError(errno, "%s",
"Failed to create temporary directory");
goto error;
}
tmpdir_template = NULL;
if (virAsprintf(&path, "%s/qemumonitorjsontest.sock", test->tmpdir) < 0)
goto error;
if (vm) {
virObjectRef(vm);
test->vm = vm;
} else {
test->vm = virDomainObjNew(xmlopt);
if (!test->vm)
goto error;
}
if (virNetSocketNewListenUNIX(path, 0700, geteuid(), getegid(),
&test->server) < 0)
goto error;
memset(src, 0, sizeof(*src));
src->type = VIR_DOMAIN_CHR_TYPE_UNIX;
src->data.nix.path = (char *)path;
src->data.nix.listen = false;
path = NULL;
if (virNetSocketListen(test->server, 1) < 0)
goto error;
cleanup:
return test;
error:
VIR_FREE(path);
VIR_FREE(tmpdir_template);
qemuMonitorTestFree(test);
test = NULL;
goto cleanup;
}
static int
qemuMonitorCommonTestInit(qemuMonitorTestPtr test)
{
int events = VIR_EVENT_HANDLE_READABLE;
if (!test)
return -1;
if (virNetSocketAccept(test->server, &test->client) < 0)
goto error;
if (!test->client)
goto error;
if (test->outgoingLength > 0)
events = VIR_EVENT_HANDLE_WRITABLE;
if (virNetSocketAddIOCallback(test->client,
events,
qemuMonitorTestIO,
test,
NULL) < 0)
goto error;
virMutexLock(&test->lock);
if (virThreadCreate(&test->thread,
true,
qemuMonitorTestWorker,
test) < 0) {
virMutexUnlock(&test->lock);
goto error;
}
test->started = test->running = true;
virMutexUnlock(&test->lock);
return 0;
error:
return -1;
}
#define QEMU_JSON_GREETING "{\"QMP\":"\
" {\"version\":"\
" {\"qemu\":"\
" {\"micro\": 1,"\
" \"minor\": 0,"\
" \"major\": 1"\
" },"\
" \"package\": \"(qemu-kvm-1.0.1)"\
" \"},"\
" \"capabilities\": []"\
" }"\
"}"
/* We skip the normal handshake reply of "{\"execute\":\"qmp_capabilities\"}" */
#define QEMU_TEXT_GREETING "QEMU 1.0,1 monitor - type 'help' for more information"
qemuMonitorTestPtr
qemuMonitorTestNew(bool json,
virDomainXMLOptionPtr xmlopt,
virDomainObjPtr vm,
virQEMUDriverPtr driver,
const char *greeting)
{
qemuMonitorTestPtr test = NULL;
virDomainChrSourceDef src;
memset(&src, 0, sizeof(src));
if (!(test = qemuMonitorCommonTestNew(xmlopt, vm, &src)))
goto error;
test->json = json;
if (!(test->mon = qemuMonitorOpen(test->vm,
&src,
json,
&qemuMonitorTestCallbacks,
driver)))
goto error;
virObjectLock(test->mon);
if (!greeting)
greeting = json ? QEMU_JSON_GREETING : QEMU_TEXT_GREETING;
if (qemuMonitorTestAddReponse(test, greeting) < 0)
goto error;
if (qemuMonitorCommonTestInit(test) < 0)
goto error;
virDomainChrSourceDefClear(&src);
return test;
error:
virDomainChrSourceDefClear(&src);
qemuMonitorTestFree(test);
return NULL;
}
qemuMonitorTestPtr
qemuMonitorTestNewAgent(virDomainXMLOptionPtr xmlopt)
{
qemuMonitorTestPtr test = NULL;
virDomainChrSourceDef src;
memset(&src, 0, sizeof(src));
if (!(test = qemuMonitorCommonTestNew(xmlopt, NULL, &src)))
goto error;
if (!(test->agent = qemuAgentOpen(test->vm,
&src,
&qemuMonitorTestAgentCallbacks)))
goto error;
virObjectLock(test->agent);
if (qemuMonitorCommonTestInit(test) < 0)
goto error;
virDomainChrSourceDefClear(&src);
return test;
error:
virDomainChrSourceDefClear(&src);
qemuMonitorTestFree(test);
return NULL;
}
qemuMonitorPtr
qemuMonitorTestGetMonitor(qemuMonitorTestPtr test)
{
return test->mon;
}
qemuAgentPtr
qemuMonitorTestGetAgent(qemuMonitorTestPtr test)
{
return test->agent;
}
| gpl-2.0 |
tectronics/filezilla-osp | src/putty/unix/uxproxy.c | 9 | 7322 | /*
* uxproxy.c: Unix implementation of platform_new_connection(),
* supporting an OpenSSH-like proxy command.
*/
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#define DEFINE_PLUG_METHOD_MACROS
#include "tree234.h"
#include "putty.h"
#include "network.h"
#include "proxy.h"
typedef struct Socket_localproxy_tag * Local_Proxy_Socket;
struct Socket_localproxy_tag {
const struct socket_function_table *fn;
/* the above variable absolutely *must* be the first in this structure */
int to_cmd, from_cmd; /* fds */
char *error;
Plug plug;
bufchain pending_output_data;
bufchain pending_input_data;
void *privptr;
};
static int localproxy_select_result(int fd, int event);
/*
* Trees to look up the pipe fds in.
*/
static tree234 *localproxy_by_fromfd, *localproxy_by_tofd;
static int localproxy_fromfd_cmp(void *av, void *bv)
{
Local_Proxy_Socket a = (Local_Proxy_Socket)av;
Local_Proxy_Socket b = (Local_Proxy_Socket)bv;
if (a->from_cmd < b->from_cmd)
return -1;
if (a->from_cmd > b->from_cmd)
return +1;
return 0;
}
static int localproxy_fromfd_find(void *av, void *bv)
{
int a = *(int *)av;
Local_Proxy_Socket b = (Local_Proxy_Socket)bv;
if (a < b->from_cmd)
return -1;
if (a > b->from_cmd)
return +1;
return 0;
}
static int localproxy_tofd_cmp(void *av, void *bv)
{
Local_Proxy_Socket a = (Local_Proxy_Socket)av;
Local_Proxy_Socket b = (Local_Proxy_Socket)bv;
if (a->to_cmd < b->to_cmd)
return -1;
if (a->to_cmd > b->to_cmd)
return +1;
return 0;
}
static int localproxy_tofd_find(void *av, void *bv)
{
int a = *(int *)av;
Local_Proxy_Socket b = (Local_Proxy_Socket)bv;
if (a < b->to_cmd)
return -1;
if (a > b->to_cmd)
return +1;
return 0;
}
/* basic proxy socket functions */
static Plug sk_localproxy_plug (Socket s, Plug p)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
Plug ret = ps->plug;
if (p)
ps->plug = p;
return ret;
}
static void sk_localproxy_close (Socket s)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
del234(localproxy_by_fromfd, ps);
del234(localproxy_by_tofd, ps);
uxsel_del(ps->to_cmd);
uxsel_del(ps->from_cmd);
close(ps->to_cmd);
close(ps->from_cmd);
sfree(ps);
}
static int localproxy_try_send(Local_Proxy_Socket ps)
{
int sent = 0;
while (bufchain_size(&ps->pending_output_data) > 0) {
void *data;
int len, ret;
bufchain_prefix(&ps->pending_output_data, &data, &len);
ret = write(ps->to_cmd, data, len);
if (ret < 0 && errno != EWOULDBLOCK) {
/* We're inside the Unix frontend here, so we know
* that the frontend handle is unnecessary. */
logevent(NULL, strerror(errno));
fatalbox("%s", strerror(errno));
} else if (ret <= 0) {
break;
} else {
bufchain_consume(&ps->pending_output_data, ret);
sent += ret;
}
}
if (bufchain_size(&ps->pending_output_data) == 0)
uxsel_del(ps->to_cmd);
else
uxsel_set(ps->to_cmd, 2, localproxy_select_result);
return sent;
}
static int sk_localproxy_write (Socket s, const char *data, int len)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
bufchain_add(&ps->pending_output_data, data, len);
localproxy_try_send(ps);
return bufchain_size(&ps->pending_output_data);
}
static int sk_localproxy_write_oob (Socket s, const char *data, int len)
{
/*
* oob data is treated as inband; nasty, but nothing really
* better we can do
*/
return sk_localproxy_write(s, data, len);
}
static void sk_localproxy_flush (Socket s)
{
/* Local_Proxy_Socket ps = (Local_Proxy_Socket) s; */
/* do nothing */
}
static void sk_localproxy_set_private_ptr (Socket s, void *ptr)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
ps->privptr = ptr;
}
static void * sk_localproxy_get_private_ptr (Socket s)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
return ps->privptr;
}
static void sk_localproxy_set_frozen (Socket s, int is_frozen)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
if (is_frozen)
uxsel_del(ps->from_cmd);
else
uxsel_set(ps->from_cmd, 1, localproxy_select_result);
}
static const char * sk_localproxy_socket_error (Socket s)
{
Local_Proxy_Socket ps = (Local_Proxy_Socket) s;
return ps->error;
}
static int localproxy_select_result(int fd, int event)
{
Local_Proxy_Socket s;
char buf[20480];
int ret;
if (!(s = find234(localproxy_by_fromfd, &fd, localproxy_fromfd_find)) &&
!(s = find234(localproxy_by_tofd, &fd, localproxy_tofd_find)) )
return 1; /* boggle */
if (event == 1) {
assert(fd == s->from_cmd);
ret = read(fd, buf, sizeof(buf));
if (ret < 0) {
return plug_closing(s->plug, strerror(errno), errno, 0);
} else if (ret == 0) {
return plug_closing(s->plug, NULL, 0, 0);
} else {
return plug_receive(s->plug, 0, buf, ret);
}
} else if (event == 2) {
assert(fd == s->to_cmd);
if (localproxy_try_send(s))
plug_sent(s->plug, bufchain_size(&s->pending_output_data));
return 1;
}
return 1;
}
Socket platform_new_connection(SockAddr addr, char *hostname,
int port, int privport,
int oobinline, int nodelay, int keepalive,
Plug plug, const Config *cfg)
{
char *cmd;
static const struct socket_function_table socket_fn_table = {
sk_localproxy_plug,
sk_localproxy_close,
sk_localproxy_write,
sk_localproxy_write_oob,
sk_localproxy_flush,
sk_localproxy_set_private_ptr,
sk_localproxy_get_private_ptr,
sk_localproxy_set_frozen,
sk_localproxy_socket_error
};
Local_Proxy_Socket ret;
int to_cmd_pipe[2], from_cmd_pipe[2], pid;
if (cfg->proxy_type != PROXY_CMD)
return NULL;
cmd = format_telnet_command(addr, port, cfg);
ret = snew(struct Socket_localproxy_tag);
ret->fn = &socket_fn_table;
ret->plug = plug;
ret->error = NULL;
bufchain_init(&ret->pending_input_data);
bufchain_init(&ret->pending_output_data);
/*
* Create the pipes to the proxy command, and spawn the proxy
* command process.
*/
if (pipe(to_cmd_pipe) < 0 ||
pipe(from_cmd_pipe) < 0) {
ret->error = dupprintf("pipe: %s", strerror(errno));
return (Socket)ret;
}
cloexec(to_cmd_pipe[1]);
cloexec(from_cmd_pipe[0]);
pid = fork();
if (pid < 0) {
ret->error = dupprintf("fork: %s", strerror(errno));
return (Socket)ret;
} else if (pid == 0) {
close(0);
close(1);
dup2(to_cmd_pipe[0], 0);
dup2(from_cmd_pipe[1], 1);
close(to_cmd_pipe[0]);
close(from_cmd_pipe[1]);
fcntl(0, F_SETFD, 0);
fcntl(1, F_SETFD, 0);
execl("/bin/sh", "sh", "-c", cmd, (void *)NULL);
_exit(255);
}
sfree(cmd);
close(to_cmd_pipe[0]);
close(from_cmd_pipe[1]);
ret->to_cmd = to_cmd_pipe[1];
ret->from_cmd = from_cmd_pipe[0];
if (!localproxy_by_fromfd)
localproxy_by_fromfd = newtree234(localproxy_fromfd_cmp);
if (!localproxy_by_tofd)
localproxy_by_tofd = newtree234(localproxy_tofd_cmp);
add234(localproxy_by_fromfd, ret);
add234(localproxy_by_tofd, ret);
uxsel_set(ret->from_cmd, 1, localproxy_select_result);
/* We are responsible for this and don't need it any more */
sk_addr_free(addr);
return (Socket) ret;
}
| gpl-2.0 |
kbridgers/VOLTE4GFAX | target/linux/generic-2.6/files/drivers/net/phy/ar8216.c | 9 | 19805 | /*
* ar8216.c: AR8216 switch driver
*
* Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/if.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/bitops.h>
#include <net/genetlink.h>
#include <linux/switch.h>
#include <linux/delay.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "ar8216.h"
/* size of the vlan table */
#define AR8X16_MAX_VLANS 128
#define AR8X16_PROBE_RETRIES 10
struct ar8216_priv {
struct switch_dev dev;
struct phy_device *phy;
u32 (*read)(struct ar8216_priv *priv, int reg);
void (*write)(struct ar8216_priv *priv, int reg, u32 val);
const struct net_device_ops *ndo_old;
struct net_device_ops ndo;
struct mutex reg_mutex;
int chip;
bool initialized;
bool port4_phy;
/* all fields below are cleared on reset */
bool vlan;
u16 vlan_id[AR8X16_MAX_VLANS];
u8 vlan_table[AR8X16_MAX_VLANS];
u8 vlan_tagged;
u16 pvid[AR8216_NUM_PORTS];
};
#define to_ar8216(_dev) container_of(_dev, struct ar8216_priv, dev)
static inline void
split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
regaddr >>= 1;
*r1 = regaddr & 0x1e;
regaddr >>= 5;
*r2 = regaddr & 0x7;
regaddr >>= 3;
*page = regaddr & 0x1ff;
}
static u32
ar8216_mii_read(struct ar8216_priv *priv, int reg)
{
struct phy_device *phy = priv->phy;
u16 r1, r2, page;
u16 lo, hi;
split_addr((u32) reg, &r1, &r2, &page);
phy->bus->write(phy->bus, 0x18, 0, page);
msleep(1); /* wait for the page switch to propagate */
lo = phy->bus->read(phy->bus, 0x10 | r2, r1);
hi = phy->bus->read(phy->bus, 0x10 | r2, r1 + 1);
return (hi << 16) | lo;
}
static void
ar8216_mii_write(struct ar8216_priv *priv, int reg, u32 val)
{
struct phy_device *phy = priv->phy;
u16 r1, r2, r3;
u16 lo, hi;
split_addr((u32) reg, &r1, &r2, &r3);
phy->bus->write(phy->bus, 0x18, 0, r3);
msleep(1); /* wait for the page switch to propagate */
lo = val & 0xffff;
hi = (u16) (val >> 16);
phy->bus->write(phy->bus, 0x10 | r2, r1 + 1, hi);
phy->bus->write(phy->bus, 0x10 | r2, r1, lo);
}
static u32
ar8216_rmw(struct ar8216_priv *priv, int reg, u32 mask, u32 val)
{
u32 v;
v = priv->read(priv, reg);
v &= ~mask;
v |= val;
priv->write(priv, reg, v);
return v;
}
static inline int
ar8216_id_chip(struct ar8216_priv *priv)
{
u32 val;
u16 id;
int i;
val = ar8216_mii_read(priv, AR8216_REG_CTRL);
if (val == ~0)
return UNKNOWN;
id = val & (AR8216_CTRL_REVISION | AR8216_CTRL_VERSION);
for (i = 0; i < AR8X16_PROBE_RETRIES; i++) {
u16 t;
val = ar8216_mii_read(priv, AR8216_REG_CTRL);
if (val == ~0)
return UNKNOWN;
t = val & (AR8216_CTRL_REVISION | AR8216_CTRL_VERSION);
if (t != id)
return UNKNOWN;
}
switch (id) {
case 0x0101:
return AR8216;
case 0x1000:
case 0x1001:
return AR8316;
default:
printk(KERN_DEBUG
"ar8216: Unknown Atheros device [ver=%d, rev=%d, phy_id=%04x%04x]\n",
(int)(id >> AR8216_CTRL_VERSION_S),
(int)(id & AR8216_CTRL_REVISION),
priv->phy->bus->read(priv->phy->bus, priv->phy->addr, 2),
priv->phy->bus->read(priv->phy->bus, priv->phy->addr, 3));
return UNKNOWN;
}
}
static int
ar8216_set_vlan(struct switch_dev *dev, const struct switch_attr *attr,
struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
priv->vlan = !!val->value.i;
return 0;
}
static int
ar8216_get_vlan(struct switch_dev *dev, const struct switch_attr *attr,
struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
val->value.i = priv->vlan;
return 0;
}
static int
ar8216_set_pvid(struct switch_dev *dev, int port, int vlan)
{
struct ar8216_priv *priv = to_ar8216(dev);
/* make sure no invalid PVIDs get set */
if (vlan >= dev->vlans)
return -EINVAL;
priv->pvid[port] = vlan;
return 0;
}
static int
ar8216_get_pvid(struct switch_dev *dev, int port, int *vlan)
{
struct ar8216_priv *priv = to_ar8216(dev);
*vlan = priv->pvid[port];
return 0;
}
static int
ar8216_set_vid(struct switch_dev *dev, const struct switch_attr *attr,
struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
priv->vlan_id[val->port_vlan] = val->value.i;
return 0;
}
static int
ar8216_get_vid(struct switch_dev *dev, const struct switch_attr *attr,
struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
val->value.i = priv->vlan_id[val->port_vlan];
return 0;
}
static int
ar8216_mangle_tx(struct sk_buff *skb, struct net_device *dev)
{
struct ar8216_priv *priv = dev->phy_ptr;
unsigned char *buf;
if (unlikely(!priv))
goto error;
if (!priv->vlan)
goto send;
if (unlikely(skb_headroom(skb) < 2)) {
if (pskb_expand_head(skb, 2, 0, GFP_ATOMIC) < 0)
goto error;
}
buf = skb_push(skb, 2);
buf[0] = 0x10;
buf[1] = 0x80;
send:
return priv->ndo_old->ndo_start_xmit(skb, dev);
error:
dev_kfree_skb_any(skb);
return 0;
}
static int
ar8216_mangle_rx(struct sk_buff *skb, int napi)
{
struct ar8216_priv *priv;
struct net_device *dev;
unsigned char *buf;
int port, vlan;
dev = skb->dev;
if (!dev)
goto error;
priv = dev->phy_ptr;
if (!priv)
goto error;
/* don't strip the header if vlan mode is disabled */
if (!priv->vlan)
goto recv;
/* strip header, get vlan id */
buf = skb->data;
skb_pull(skb, 2);
/* check for vlan header presence */
if ((buf[12 + 2] != 0x81) || (buf[13 + 2] != 0x00))
goto recv;
port = buf[0] & 0xf;
/* no need to fix up packets coming from a tagged source */
if (priv->vlan_tagged & (1 << port))
goto recv;
/* lookup port vid from local table, the switch passes an invalid vlan id */
vlan = priv->vlan_id[priv->pvid[port]];
buf[14 + 2] &= 0xf0;
buf[14 + 2] |= vlan >> 8;
buf[15 + 2] = vlan & 0xff;
recv:
skb->protocol = eth_type_trans(skb, skb->dev);
if (napi)
return netif_receive_skb(skb);
else
return netif_rx(skb);
error:
/* no vlan? eat the packet! */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static int
ar8216_netif_rx(struct sk_buff *skb)
{
return ar8216_mangle_rx(skb, 0);
}
static int
ar8216_netif_receive_skb(struct sk_buff *skb)
{
return ar8216_mangle_rx(skb, 1);
}
static struct switch_attr ar8216_globals[] = {
{
.type = SWITCH_TYPE_INT,
.name = "enable_vlan",
.description = "Enable VLAN mode",
.set = ar8216_set_vlan,
.get = ar8216_get_vlan,
.max = 1
},
};
static struct switch_attr ar8216_port[] = {
};
static struct switch_attr ar8216_vlan[] = {
{
.type = SWITCH_TYPE_INT,
.name = "vid",
.description = "VLAN ID (0-4094)",
.set = ar8216_set_vid,
.get = ar8216_get_vid,
.max = 4094,
},
};
static int
ar8216_get_ports(struct switch_dev *dev, struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
u8 ports = priv->vlan_table[val->port_vlan];
int i;
val->len = 0;
for (i = 0; i < AR8216_NUM_PORTS; i++) {
struct switch_port *p;
if (!(ports & (1 << i)))
continue;
p = &val->value.ports[val->len++];
p->id = i;
if (priv->vlan_tagged & (1 << i))
p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
else
p->flags = 0;
}
return 0;
}
static int
ar8216_set_ports(struct switch_dev *dev, struct switch_val *val)
{
struct ar8216_priv *priv = to_ar8216(dev);
u8 *vt = &priv->vlan_table[val->port_vlan];
int i, j;
*vt = 0;
for (i = 0; i < val->len; i++) {
struct switch_port *p = &val->value.ports[i];
if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
priv->vlan_tagged |= (1 << p->id);
else {
priv->vlan_tagged &= ~(1 << p->id);
priv->pvid[p->id] = val->port_vlan;
/* make sure that an untagged port does not
* appear in other vlans */
for (j = 0; j < AR8X16_MAX_VLANS; j++) {
if (j == val->port_vlan)
continue;
priv->vlan_table[j] &= ~(1 << p->id);
}
}
*vt |= 1 << p->id;
}
return 0;
}
static int
ar8216_wait_bit(struct ar8216_priv *priv, int reg, u32 mask, u32 val)
{
int timeout = 20;
while ((priv->read(priv, reg) & mask) != val) {
if (timeout-- <= 0) {
printk(KERN_ERR "ar8216: timeout waiting for operation to complete\n");
return 1;
}
}
return 0;
}
static void
ar8216_vtu_op(struct ar8216_priv *priv, u32 op, u32 val)
{
if (ar8216_wait_bit(priv, AR8216_REG_VTU, AR8216_VTU_ACTIVE, 0))
return;
if ((op & AR8216_VTU_OP) == AR8216_VTU_OP_LOAD) {
val &= AR8216_VTUDATA_MEMBER;
val |= AR8216_VTUDATA_VALID;
priv->write(priv, AR8216_REG_VTU_DATA, val);
}
op |= AR8216_VTU_ACTIVE;
priv->write(priv, AR8216_REG_VTU, op);
}
static int
ar8216_hw_apply(struct switch_dev *dev)
{
struct ar8216_priv *priv = to_ar8216(dev);
u8 portmask[AR8216_NUM_PORTS];
int i, j;
mutex_lock(&priv->reg_mutex);
/* flush all vlan translation unit entries */
ar8216_vtu_op(priv, AR8216_VTU_OP_FLUSH, 0);
memset(portmask, 0, sizeof(portmask));
if (priv->vlan) {
/* calculate the port destination masks and load vlans
* into the vlan translation unit */
for (j = 0; j < AR8X16_MAX_VLANS; j++) {
u8 vp = priv->vlan_table[j];
if (!vp)
continue;
for (i = 0; i < AR8216_NUM_PORTS; i++) {
u8 mask = (1 << i);
if (vp & mask)
portmask[i] |= vp & ~mask;
}
ar8216_vtu_op(priv,
AR8216_VTU_OP_LOAD |
(priv->vlan_id[j] << AR8216_VTU_VID_S),
priv->vlan_table[j]);
}
} else {
/* vlan disabled:
* isolate all ports, but connect them to the cpu port */
for (i = 0; i < AR8216_NUM_PORTS; i++) {
if (i == AR8216_PORT_CPU)
continue;
portmask[i] = 1 << AR8216_PORT_CPU;
portmask[AR8216_PORT_CPU] |= (1 << i);
}
}
/* update the port destination mask registers and tag settings */
for (i = 0; i < AR8216_NUM_PORTS; i++) {
int egress, ingress;
int pvid;
if (priv->vlan) {
pvid = priv->vlan_id[priv->pvid[i]];
} else {
pvid = i;
}
if (priv->vlan && (priv->vlan_tagged & (1 << i))) {
egress = AR8216_OUT_ADD_VLAN;
} else {
egress = AR8216_OUT_STRIP_VLAN;
}
if (priv->vlan) {
ingress = AR8216_IN_SECURE;
} else {
ingress = AR8216_IN_PORT_ONLY;
}
ar8216_rmw(priv, AR8216_REG_PORT_CTRL(i),
AR8216_PORT_CTRL_LEARN | AR8216_PORT_CTRL_VLAN_MODE |
AR8216_PORT_CTRL_SINGLE_VLAN | AR8216_PORT_CTRL_STATE |
AR8216_PORT_CTRL_HEADER | AR8216_PORT_CTRL_LEARN_LOCK,
AR8216_PORT_CTRL_LEARN |
(priv->vlan && i == AR8216_PORT_CPU && (priv->chip == AR8216) ?
AR8216_PORT_CTRL_HEADER : 0) |
(egress << AR8216_PORT_CTRL_VLAN_MODE_S) |
(AR8216_PORT_STATE_FORWARD << AR8216_PORT_CTRL_STATE_S));
ar8216_rmw(priv, AR8216_REG_PORT_VLAN(i),
AR8216_PORT_VLAN_DEST_PORTS | AR8216_PORT_VLAN_MODE |
AR8216_PORT_VLAN_DEFAULT_ID,
(portmask[i] << AR8216_PORT_VLAN_DEST_PORTS_S) |
(ingress << AR8216_PORT_VLAN_MODE_S) |
(pvid << AR8216_PORT_VLAN_DEFAULT_ID_S));
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
static int
ar8316_hw_init(struct ar8216_priv *priv) {
int i;
u32 val, newval;
struct mii_bus *bus;
val = priv->read(priv, 0x8);
if (priv->phy->interface == PHY_INTERFACE_MODE_RGMII) {
if (priv->port4_phy) {
/* value taken from Ubiquiti RouterStation Pro */
newval = 0x81461bea;
printk(KERN_INFO "ar8316: Using port 4 as PHY\n");
} else {
newval = 0x01261be2;
printk(KERN_INFO "ar8316: Using port 4 as switch port\n");
}
} else if (priv->phy->interface == PHY_INTERFACE_MODE_GMII) {
/* value taken from AVM Fritz!Box 7390 sources */
newval = 0x010e5b71;
} else {
/* no known value for phy interface */
printk(KERN_ERR "ar8316: unsupported mii mode: %d.\n",
priv->phy->interface);
return -EINVAL;
}
if (val == newval)
goto out;
priv->write(priv, 0x8, newval);
/* standard atheros magic */
priv->write(priv, 0x38, 0xc000050e);
/* Initialize the ports */
bus = priv->phy->bus;
for (i = 0; i < 5; i++) {
if ((i == 4) && priv->port4_phy &&
priv->phy->interface == PHY_INTERFACE_MODE_RGMII) {
/* work around for phy4 rgmii mode */
bus->write(bus, i, MII_ATH_DBG_ADDR, 0x12);
bus->write(bus, i, MII_ATH_DBG_DATA, 0x480c);
/* rx delay */
bus->write(bus, i, MII_ATH_DBG_ADDR, 0x0);
bus->write(bus, i, MII_ATH_DBG_DATA, 0x824e);
/* tx delay */
bus->write(bus, i, MII_ATH_DBG_ADDR, 0x5);
bus->write(bus, i, MII_ATH_DBG_DATA, 0x3d47);
msleep(1000);
}
/* initialize the port itself */
bus->write(bus, i, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
bus->write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL);
bus->write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
msleep(1000);
}
out:
priv->initialized = true;
return 0;
}
static int
ar8216_reset_switch(struct switch_dev *dev)
{
struct ar8216_priv *priv = to_ar8216(dev);
int i;
mutex_lock(&priv->reg_mutex);
memset(&priv->vlan, 0, sizeof(struct ar8216_priv) -
offsetof(struct ar8216_priv, vlan));
for (i = 0; i < AR8X16_MAX_VLANS; i++) {
priv->vlan_id[i] = i;
}
for (i = 0; i < AR8216_NUM_PORTS; i++) {
/* Enable port learning and tx */
priv->write(priv, AR8216_REG_PORT_CTRL(i),
AR8216_PORT_CTRL_LEARN |
(4 << AR8216_PORT_CTRL_STATE_S));
priv->write(priv, AR8216_REG_PORT_VLAN(i), 0);
/* Configure all PHYs */
if (i == AR8216_PORT_CPU) {
priv->write(priv, AR8216_REG_PORT_STATUS(i),
AR8216_PORT_STATUS_LINK_UP |
((priv->chip == AR8316) ?
AR8216_PORT_SPEED_1000M : AR8216_PORT_SPEED_100M) |
AR8216_PORT_STATUS_TXMAC |
AR8216_PORT_STATUS_RXMAC |
((priv->chip == AR8316) ? AR8216_PORT_STATUS_RXFLOW : 0) |
((priv->chip == AR8316) ? AR8216_PORT_STATUS_TXFLOW : 0) |
AR8216_PORT_STATUS_DUPLEX);
} else {
priv->write(priv, AR8216_REG_PORT_STATUS(i),
AR8216_PORT_STATUS_LINK_AUTO);
}
}
/* XXX: undocumented magic from atheros, required! */
priv->write(priv, 0x38, 0xc000050e);
if (priv->chip == AR8216) {
ar8216_rmw(priv, AR8216_REG_GLOBAL_CTRL,
AR8216_GCTRL_MTU, 1518 + 8 + 2);
} else if (priv->chip == AR8316) {
/* enable jumbo frames */
ar8216_rmw(priv, AR8216_REG_GLOBAL_CTRL,
AR8316_GCTRL_MTU, 9018 + 8 + 2);
}
if (priv->chip == AR8316) {
/* enable cpu port to receive multicast and broadcast frames */
priv->write(priv, AR8216_REG_FLOOD_MASK, 0x003f003f);
}
mutex_unlock(&priv->reg_mutex);
return ar8216_hw_apply(dev);
}
static const struct switch_dev_ops ar8216_ops = {
.attr_global = {
.attr = ar8216_globals,
.n_attr = ARRAY_SIZE(ar8216_globals),
},
.attr_port = {
.attr = ar8216_port,
.n_attr = ARRAY_SIZE(ar8216_port),
},
.attr_vlan = {
.attr = ar8216_vlan,
.n_attr = ARRAY_SIZE(ar8216_vlan),
},
.get_port_pvid = ar8216_get_pvid,
.set_port_pvid = ar8216_set_pvid,
.get_vlan_ports = ar8216_get_ports,
.set_vlan_ports = ar8216_set_ports,
.apply_config = ar8216_hw_apply,
.reset_switch = ar8216_reset_switch,
};
static int
ar8216_config_init(struct phy_device *pdev)
{
struct ar8216_priv *priv = pdev->priv;
struct net_device *dev = pdev->attached_dev;
struct switch_dev *swdev;
int ret;
if (!priv) {
priv = kzalloc(sizeof(struct ar8216_priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
}
priv->phy = pdev;
priv->chip = ar8216_id_chip(priv);
if (pdev->addr != 0) {
if (priv->chip == AR8316) {
pdev->supported |= SUPPORTED_1000baseT_Full;
pdev->advertising |= ADVERTISED_1000baseT_Full;
/* check if we're attaching to the switch twice */
pdev = pdev->bus->phy_map[0];
if (!pdev) {
kfree(priv);
return 0;
}
/* switch device has not been initialized, reuse priv */
if (!pdev->priv) {
priv->port4_phy = true;
pdev->priv = priv;
return 0;
}
kfree(priv);
/* switch device has been initialized, reinit */
priv = pdev->priv;
priv->dev.ports = (AR8216_NUM_PORTS - 1);
priv->initialized = false;
priv->port4_phy = true;
ar8316_hw_init(priv);
return 0;
}
kfree(priv);
return 0;
}
printk(KERN_INFO "%s: AR%d switch driver attached.\n",
pdev->attached_dev->name, priv->chip);
pdev->supported = priv->chip == AR8316 ?
SUPPORTED_1000baseT_Full : SUPPORTED_100baseT_Full;
pdev->advertising = pdev->supported;
mutex_init(&priv->reg_mutex);
priv->read = ar8216_mii_read;
priv->write = ar8216_mii_write;
pdev->priv = priv;
swdev = &priv->dev;
swdev->cpu_port = AR8216_PORT_CPU;
swdev->ops = &ar8216_ops;
swdev->ports = AR8216_NUM_PORTS;
if (priv->chip == AR8316) {
swdev->name = "Atheros AR8316";
swdev->vlans = AR8X16_MAX_VLANS;
if (priv->port4_phy) {
/* port 5 connected to the other mac, therefore unusable */
swdev->ports = (AR8216_NUM_PORTS - 1);
}
} else {
swdev->name = "Atheros AR8216";
swdev->vlans = AR8216_NUM_VLANS;
}
if ((ret = register_switch(&priv->dev, pdev->attached_dev)) < 0) {
kfree(priv);
goto done;
}
if (priv->chip == AR8316) {
ret = ar8316_hw_init(priv);
if (ret) {
kfree(priv);
goto done;
}
}
ret = ar8216_reset_switch(&priv->dev);
if (ret) {
kfree(priv);
goto done;
}
dev->phy_ptr = priv;
/* VID fixup only needed on ar8216 */
if (pdev->addr == 0 && priv->chip == AR8216) {
pdev->pkt_align = 2;
pdev->netif_receive_skb = ar8216_netif_receive_skb;
pdev->netif_rx = ar8216_netif_rx;
priv->ndo_old = dev->netdev_ops;
memcpy(&priv->ndo, priv->ndo_old, sizeof(struct net_device_ops));
priv->ndo.ndo_start_xmit = ar8216_mangle_tx;
dev->netdev_ops = &priv->ndo;
}
done:
return ret;
}
static int
ar8216_read_status(struct phy_device *phydev)
{
struct ar8216_priv *priv = phydev->priv;
int ret;
if (phydev->addr != 0) {
return genphy_read_status(phydev);
}
phydev->speed = priv->chip == AR8316 ? SPEED_1000 : SPEED_100;
phydev->duplex = DUPLEX_FULL;
phydev->link = 1;
/* flush the address translation unit */
mutex_lock(&priv->reg_mutex);
ret = ar8216_wait_bit(priv, AR8216_REG_ATU, AR8216_ATU_ACTIVE, 0);
if (!ret)
priv->write(priv, AR8216_REG_ATU, AR8216_ATU_OP_FLUSH);
else
ret = -ETIMEDOUT;
mutex_unlock(&priv->reg_mutex);
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
return ret;
}
static int
ar8216_config_aneg(struct phy_device *phydev)
{
if (phydev->addr == 0)
return 0;
return genphy_config_aneg(phydev);
}
static int
ar8216_probe(struct phy_device *pdev)
{
struct ar8216_priv priv;
u16 chip;
priv.phy = pdev;
chip = ar8216_id_chip(&priv);
if (chip == UNKNOWN)
return -ENODEV;
return 0;
}
static void
ar8216_remove(struct phy_device *pdev)
{
struct ar8216_priv *priv = pdev->priv;
struct net_device *dev = pdev->attached_dev;
if (!priv)
return;
if (priv->ndo_old && dev)
dev->netdev_ops = priv->ndo_old;
if (pdev->addr == 0)
unregister_switch(&priv->dev);
kfree(priv);
}
static struct phy_driver ar8216_driver = {
.phy_id = 0x004d0000,
.name = "Atheros AR8216/AR8316",
.phy_id_mask = 0xffff0000,
.features = PHY_BASIC_FEATURES,
.probe = ar8216_probe,
.remove = ar8216_remove,
.config_init = &ar8216_config_init,
.config_aneg = &ar8216_config_aneg,
.read_status = &ar8216_read_status,
.driver = { .owner = THIS_MODULE },
};
int __init
ar8216_init(void)
{
return phy_driver_register(&ar8216_driver);
}
void __exit
ar8216_exit(void)
{
phy_driver_unregister(&ar8216_driver);
}
module_init(ar8216_init);
module_exit(ar8216_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
evolver56k/xpenology | drivers/net/wireless/rtlwifi/rtl8192se/led.c | 9 | 4149 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../pci.h"
#include "reg.h"
#include "led.h"
static void _rtl92se_init_led(struct ieee80211_hw *hw,
struct rtl_led *pled, enum rtl_led_pin ledpin)
{
pled->hw = hw;
pled->ledpin = ledpin;
pled->ledon = false;
}
void rtl92se_init_sw_leds(struct ieee80211_hw *hw)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
_rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
_rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
}
void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
{
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
break;
case LED_PIN_LED0:
rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0xf0);
break;
case LED_PIN_LED1:
rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f);
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
}
pled->ledon = true;
}
void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
{
struct rtl_priv *rtlpriv;
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 ledcfg;
rtlpriv = rtl_priv(hw);
if (!rtlpriv || rtlpriv->max_fw_size)
return;
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
if (pcipriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1)));
else
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
break;
case LED_PIN_LED1:
ledcfg &= 0x0f;
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
}
pled->ledon = false;
}
static void _rtl92se_sw_led_control(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction)
{
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
switch (ledaction) {
case LED_CTL_POWER_ON:
case LED_CTL_LINK:
case LED_CTL_NO_LINK:
rtl92se_sw_led_on(hw, pLed0);
break;
case LED_CTL_POWER_OFF:
rtl92se_sw_led_off(hw, pLed0);
break;
default:
break;
}
}
void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
(ledaction == LED_CTL_TX ||
ledaction == LED_CTL_RX ||
ledaction == LED_CTL_SITE_SURVEY ||
ledaction == LED_CTL_LINK ||
ledaction == LED_CTL_NO_LINK ||
ledaction == LED_CTL_START_TO_LINK ||
ledaction == LED_CTL_POWER_ON)) {
return;
}
RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92se_sw_led_control(hw, ledaction);
}
| gpl-2.0 |
pipcet/gcc | libgfortran/intrinsics/exit.c | 9 | 1502 | /* Implementation of the EXIT intrinsic.
Copyright (C) 2004-2017 Free Software Foundation, Inc.
Contributed by Steven G. Kargl <kargls@comcast.net>.
This file is part of the GNU Fortran runtime library (libgfortran).
Libgfortran is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Libgfortran is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "libgfortran.h"
/* SUBROUTINE EXIT(STATUS)
INTEGER, INTENT(IN), OPTIONAL :: STATUS */
extern void exit_i4 (GFC_INTEGER_4 *);
export_proto(exit_i4);
void
exit_i4 (GFC_INTEGER_4 * status)
{
exit (status ? *status : 0);
}
extern void exit_i8 (GFC_INTEGER_8 *);
export_proto(exit_i8);
void
exit_i8 (GFC_INTEGER_8 * status)
{
exit (status ? *status : 0);
}
| gpl-2.0 |
crazy-canux/git | builtin/rev-list.c | 9 | 10369 | #include "cache.h"
#include "commit.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "pack.h"
#include "pack-bitmap.h"
#include "builtin.h"
#include "log-tree.h"
#include "graph.h"
#include "bisect.h"
static const char rev_list_usage[] =
"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
" limiting output:\n"
" --max-count=<n>\n"
" --max-age=<epoch>\n"
" --min-age=<epoch>\n"
" --sparse\n"
" --no-merges\n"
" --min-parents=<n>\n"
" --no-min-parents\n"
" --max-parents=<n>\n"
" --no-max-parents\n"
" --remove-empty\n"
" --all\n"
" --branches\n"
" --tags\n"
" --remotes\n"
" --stdin\n"
" --quiet\n"
" ordering output:\n"
" --topo-order\n"
" --date-order\n"
" --reverse\n"
" formatting output:\n"
" --parents\n"
" --children\n"
" --objects | --objects-edge\n"
" --unpacked\n"
" --header | --pretty\n"
" --abbrev=<n> | --no-abbrev\n"
" --abbrev-commit\n"
" --left-right\n"
" --count\n"
" special purpose:\n"
" --bisect\n"
" --bisect-vars\n"
" --bisect-all"
;
static void finish_commit(struct commit *commit, void *data);
static void show_commit(struct commit *commit, void *data)
{
struct rev_list_info *info = data;
struct rev_info *revs = info->revs;
if (info->flags & REV_LIST_QUIET) {
finish_commit(commit, data);
return;
}
graph_show_commit(revs->graph);
if (revs->count) {
if (commit->object.flags & PATCHSAME)
revs->count_same++;
else if (commit->object.flags & SYMMETRIC_LEFT)
revs->count_left++;
else
revs->count_right++;
finish_commit(commit, data);
return;
}
if (info->show_timestamp)
printf("%lu ", commit->date);
if (info->header_prefix)
fputs(info->header_prefix, stdout);
if (!revs->graph)
fputs(get_revision_mark(revs, commit), stdout);
if (revs->abbrev_commit && revs->abbrev)
fputs(find_unique_abbrev(commit->object.oid.hash, revs->abbrev),
stdout);
else
fputs(oid_to_hex(&commit->object.oid), stdout);
if (revs->print_parents) {
struct commit_list *parents = commit->parents;
while (parents) {
printf(" %s", oid_to_hex(&parents->item->object.oid));
parents = parents->next;
}
}
if (revs->children.name) {
struct commit_list *children;
children = lookup_decoration(&revs->children, &commit->object);
while (children) {
printf(" %s", oid_to_hex(&children->item->object.oid));
children = children->next;
}
}
show_decorations(revs, commit);
if (revs->commit_format == CMIT_FMT_ONELINE)
putchar(' ');
else
putchar('\n');
if (revs->verbose_header && get_cached_commit_buffer(commit, NULL)) {
struct strbuf buf = STRBUF_INIT;
struct pretty_print_context ctx = {0};
ctx.abbrev = revs->abbrev;
ctx.date_mode = revs->date_mode;
ctx.date_mode_explicit = revs->date_mode_explicit;
ctx.fmt = revs->commit_format;
ctx.output_encoding = get_log_output_encoding();
pretty_print_commit(&ctx, commit, &buf);
if (revs->graph) {
if (buf.len) {
if (revs->commit_format != CMIT_FMT_ONELINE)
graph_show_oneline(revs->graph);
graph_show_commit_msg(revs->graph, &buf);
/*
* Add a newline after the commit message.
*
* Usually, this newline produces a blank
* padding line between entries, in which case
* we need to add graph padding on this line.
*
* However, the commit message may not end in a
* newline. In this case the newline simply
* ends the last line of the commit message,
* and we don't need any graph output. (This
* always happens with CMIT_FMT_ONELINE, and it
* happens with CMIT_FMT_USERFORMAT when the
* format doesn't explicitly end in a newline.)
*/
if (buf.len && buf.buf[buf.len - 1] == '\n')
graph_show_padding(revs->graph);
putchar('\n');
} else {
/*
* If the message buffer is empty, just show
* the rest of the graph output for this
* commit.
*/
if (graph_show_remainder(revs->graph))
putchar('\n');
if (revs->commit_format == CMIT_FMT_ONELINE)
putchar('\n');
}
} else {
if (revs->commit_format != CMIT_FMT_USERFORMAT ||
buf.len) {
fwrite(buf.buf, 1, buf.len, stdout);
putchar(info->hdr_termination);
}
}
strbuf_release(&buf);
} else {
if (graph_show_remainder(revs->graph))
putchar('\n');
}
maybe_flush_or_die(stdout, "stdout");
finish_commit(commit, data);
}
static void finish_commit(struct commit *commit, void *data)
{
if (commit->parents) {
free_commit_list(commit->parents);
commit->parents = NULL;
}
free_commit_buffer(commit);
}
static void finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
die("missing blob object '%s'", oid_to_hex(&obj->oid));
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
parse_object(obj->oid.hash);
}
static void show_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
finish_object(obj, name, cb_data);
if (info->flags & REV_LIST_QUIET)
return;
show_object_with_name(stdout, obj, name);
}
static void show_edge(struct commit *commit)
{
printf("-%s\n", oid_to_hex(&commit->object.oid));
}
static void print_var_str(const char *var, const char *val)
{
printf("%s='%s'\n", var, val);
}
static void print_var_int(const char *var, int val)
{
printf("%s=%d\n", var, val);
}
static int show_bisect_vars(struct rev_list_info *info, int reaches, int all)
{
int cnt, flags = info->flags;
char hex[GIT_SHA1_HEXSZ + 1] = "";
struct commit_list *tried;
struct rev_info *revs = info->revs;
if (!revs->commits)
return 1;
revs->commits = filter_skipped(revs->commits, &tried,
flags & BISECT_SHOW_ALL,
NULL, NULL);
/*
* revs->commits can reach "reaches" commits among
* "all" commits. If it is good, then there are
* (all-reaches) commits left to be bisected.
* On the other hand, if it is bad, then the set
* to bisect is "reaches".
* A bisect set of size N has (N-1) commits further
* to test, as we already know one bad one.
*/
cnt = all - reaches;
if (cnt < reaches)
cnt = reaches;
if (revs->commits)
sha1_to_hex_r(hex, revs->commits->item->object.oid.hash);
if (flags & BISECT_SHOW_ALL) {
traverse_commit_list(revs, show_commit, show_object, info);
printf("------\n");
}
print_var_str("bisect_rev", hex);
print_var_int("bisect_nr", cnt - 1);
print_var_int("bisect_good", all - reaches - 1);
print_var_int("bisect_bad", reaches - 1);
print_var_int("bisect_all", all);
print_var_int("bisect_steps", estimate_bisect_steps(all));
return 0;
}
static int show_object_fast(
const unsigned char *sha1,
enum object_type type,
int exclude,
uint32_t name_hash,
struct packed_git *found_pack,
off_t found_offset)
{
fprintf(stdout, "%s\n", sha1_to_hex(sha1));
return 1;
}
int cmd_rev_list(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
struct rev_list_info info;
int i;
int bisect_list = 0;
int bisect_show_vars = 0;
int bisect_find_all = 0;
int use_bitmap_index = 0;
git_config(git_default_config, NULL);
init_revisions(&revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
argc = setup_revisions(argc, argv, &revs, NULL);
memset(&info, 0, sizeof(info));
info.revs = &revs;
if (revs.bisect)
bisect_list = 1;
if (DIFF_OPT_TST(&revs.diffopt, QUICK))
info.flags |= REV_LIST_QUIET;
for (i = 1 ; i < argc; i++) {
const char *arg = argv[i];
if (!strcmp(arg, "--header")) {
revs.verbose_header = 1;
continue;
}
if (!strcmp(arg, "--timestamp")) {
info.show_timestamp = 1;
continue;
}
if (!strcmp(arg, "--bisect")) {
bisect_list = 1;
continue;
}
if (!strcmp(arg, "--bisect-all")) {
bisect_list = 1;
bisect_find_all = 1;
info.flags |= BISECT_SHOW_ALL;
revs.show_decorations = 1;
continue;
}
if (!strcmp(arg, "--bisect-vars")) {
bisect_list = 1;
bisect_show_vars = 1;
continue;
}
if (!strcmp(arg, "--use-bitmap-index")) {
use_bitmap_index = 1;
continue;
}
if (!strcmp(arg, "--test-bitmap")) {
test_bitmap_walk(&revs);
return 0;
}
usage(rev_list_usage);
}
if (revs.commit_format != CMIT_FMT_UNSPECIFIED) {
/* The command line has a --pretty */
info.hdr_termination = '\n';
if (revs.commit_format == CMIT_FMT_ONELINE)
info.header_prefix = "";
else
info.header_prefix = "commit ";
}
else if (revs.verbose_header)
/* Only --header was specified */
revs.commit_format = CMIT_FMT_RAW;
if ((!revs.commits &&
(!(revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
!revs.pending.nr)) ||
revs.diff)
usage(rev_list_usage);
if (revs.show_notes)
die(_("rev-list does not support display of notes"));
save_commit_buffer = (revs.verbose_header ||
revs.grep_filter.pattern_list ||
revs.grep_filter.header_list);
if (bisect_list)
revs.limited = 1;
if (use_bitmap_index && !revs.prune) {
if (revs.count && !revs.left_right && !revs.cherry_mark) {
uint32_t commit_count;
if (!prepare_bitmap_walk(&revs)) {
count_bitmap_commit_list(&commit_count, NULL, NULL, NULL);
printf("%d\n", commit_count);
return 0;
}
} else if (revs.tag_objects && revs.tree_objects && revs.blob_objects) {
if (!prepare_bitmap_walk(&revs)) {
traverse_bitmap_commit_list(&show_object_fast);
return 0;
}
}
}
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
if (revs.tree_objects)
mark_edges_uninteresting(&revs, show_edge);
if (bisect_list) {
int reaches = reaches, all = all;
revs.commits = find_bisection(revs.commits, &reaches, &all,
bisect_find_all);
if (bisect_show_vars)
return show_bisect_vars(&info, reaches, all);
}
traverse_commit_list(&revs, show_commit, show_object, &info);
if (revs.count) {
if (revs.left_right && revs.cherry_mark)
printf("%d\t%d\t%d\n", revs.count_left, revs.count_right, revs.count_same);
else if (revs.left_right)
printf("%d\t%d\n", revs.count_left, revs.count_right);
else if (revs.cherry_mark)
printf("%d\t%d\n", revs.count_left + revs.count_right, revs.count_same);
else
printf("%d\n", revs.count_left + revs.count_right);
}
return 0;
}
| gpl-2.0 |
iconia-dev/android_kernel_acer_t20-common | drivers/staging/rt2860/common/rtmp_mcu.c | 265 | 9527 | /*
*************************************************************************
* Ralink Tech Inc.
* 5F., No.36, Taiyuan St., Jhubei City,
* Hsinchu County 302,
* Taiwan, R.O.C.
*
* (c) Copyright 2002-2007, Ralink Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
*************************************************************************
Module Name:
rtmp_mcu.c
Abstract:
Miniport generic portion header file
Revision History:
Who When What
-------- ---------- ----------------------------------------------
*/
#include "../rt_config.h"
#include <linux/crc-ccitt.h>
#include <linux/firmware.h>
#ifdef RTMP_MAC_USB
#define FIRMWAREIMAGE_LENGTH 0x1000
#define FIRMWARE_2870_MIN_VERSION 12
#define FIRMWARE_2870_FILENAME "rt2870.bin"
MODULE_FIRMWARE(FIRMWARE_2870_FILENAME);
#define FIRMWARE_3070_MIN_VERSION 17
#define FIRMWARE_3070_FILENAME "rt3070.bin"
MODULE_FIRMWARE(FIRMWARE_3070_FILENAME);
#define FIRMWARE_3071_MIN_VERSION 17
#define FIRMWARE_3071_FILENAME "rt3071.bin" /* for RT3071/RT3072 */
MODULE_FIRMWARE(FIRMWARE_3071_FILENAME);
#else /* RTMP_MAC_PCI */
#define FIRMWAREIMAGE_LENGTH 0x2000
#define FIRMWARE_2860_MIN_VERSION 11
#define FIRMWARE_2860_FILENAME "rt2860.bin"
MODULE_FIRMWARE(FIRMWARE_2860_FILENAME);
#define FIRMWARE_3090_MIN_VERSION 19
#define FIRMWARE_3090_FILENAME "rt3090.bin" /* for RT3090/RT3390 */
MODULE_FIRMWARE(FIRMWARE_3090_FILENAME);
#endif
/*
========================================================================
Routine Description:
erase 8051 firmware image in MAC ASIC
Arguments:
Adapter Pointer to our adapter
IRQL = PASSIVE_LEVEL
========================================================================
*/
int RtmpAsicEraseFirmware(struct rt_rtmp_adapter *pAd)
{
unsigned long i;
for (i = 0; i < MAX_FIRMWARE_IMAGE_SIZE; i += 4)
RTMP_IO_WRITE32(pAd, FIRMWARE_IMAGE_BASE + i, 0);
return 0;
}
static const struct firmware *rtmp_get_firmware(struct rt_rtmp_adapter *adapter)
{
const char *name;
const struct firmware *fw = NULL;
u8 min_version;
struct device *dev;
int err;
if (adapter->firmware)
return adapter->firmware;
#ifdef RTMP_MAC_USB
if (IS_RT3071(adapter)) {
name = FIRMWARE_3071_FILENAME;
min_version = FIRMWARE_3071_MIN_VERSION;
} else if (IS_RT3070(adapter)) {
name = FIRMWARE_3070_FILENAME;
min_version = FIRMWARE_3070_MIN_VERSION;
} else {
name = FIRMWARE_2870_FILENAME;
min_version = FIRMWARE_2870_MIN_VERSION;
}
dev = &((struct os_cookie *)adapter->OS_Cookie)->pUsb_Dev->dev;
#else /* RTMP_MAC_PCI */
if (IS_RT3090(adapter) || IS_RT3390(adapter)) {
name = FIRMWARE_3090_FILENAME;
min_version = FIRMWARE_3090_MIN_VERSION;
} else {
name = FIRMWARE_2860_FILENAME;
min_version = FIRMWARE_2860_MIN_VERSION;
}
dev = &((struct os_cookie *)adapter->OS_Cookie)->pci_dev->dev;
#endif
err = request_firmware(&fw, name, dev);
if (err) {
dev_err(dev, "firmware file %s request failed (%d)\n",
name, err);
return NULL;
}
if (fw->size < FIRMWAREIMAGE_LENGTH) {
dev_err(dev, "firmware file %s size is invalid\n", name);
goto invalid;
}
/* is it new enough? */
adapter->FirmwareVersion = fw->data[FIRMWAREIMAGE_LENGTH - 3];
if (adapter->FirmwareVersion < min_version) {
dev_err(dev,
"firmware file %s is too old;"
" driver requires v%d or later\n",
name, min_version);
goto invalid;
}
/* is the internal CRC correct? */
if (crc_ccitt(0xffff, fw->data, FIRMWAREIMAGE_LENGTH - 2) !=
(fw->data[FIRMWAREIMAGE_LENGTH - 2] |
(fw->data[FIRMWAREIMAGE_LENGTH - 1] << 8))) {
dev_err(dev, "firmware file %s failed internal CRC\n", name);
goto invalid;
}
adapter->firmware = fw;
return fw;
invalid:
release_firmware(fw);
return NULL;
}
/*
========================================================================
Routine Description:
Load 8051 firmware file into MAC ASIC
Arguments:
Adapter Pointer to our adapter
Return Value:
NDIS_STATUS_SUCCESS firmware image load ok
NDIS_STATUS_FAILURE image not found
IRQL = PASSIVE_LEVEL
========================================================================
*/
int RtmpAsicLoadFirmware(struct rt_rtmp_adapter *pAd)
{
const struct firmware *fw;
int Status = NDIS_STATUS_SUCCESS;
unsigned long Index;
u32 MacReg = 0;
fw = rtmp_get_firmware(pAd);
if (!fw)
return NDIS_STATUS_FAILURE;
RTMP_WRITE_FIRMWARE(pAd, fw->data, FIRMWAREIMAGE_LENGTH);
/* check if MCU is ready */
Index = 0;
do {
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &MacReg);
if (MacReg & 0x80)
break;
RTMPusecDelay(1000);
} while (Index++ < 1000);
if (Index > 1000) {
DBGPRINT(RT_DEBUG_ERROR,
("NICLoadFirmware: MCU is not ready\n"));
Status = NDIS_STATUS_FAILURE;
}
DBGPRINT(RT_DEBUG_TRACE, ("<=== %s (status=%d)\n", __func__, Status));
return Status;
}
int RtmpAsicSendCommandToMcu(struct rt_rtmp_adapter *pAd,
u8 Command,
u8 Token, u8 Arg0, u8 Arg1)
{
HOST_CMD_CSR_STRUC H2MCmd;
H2M_MAILBOX_STRUC H2MMailbox;
unsigned long i = 0;
#ifdef PCIE_PS_SUPPORT
/* 3090F power solution 3 has hw limitation that needs to ban all mcu command */
/* when firmware is in radio state. For other chip doesn't have this limitation. */
if (((IS_RT3090(pAd) || IS_RT3572(pAd) || IS_RT3390(pAd))
&& IS_VERSION_AFTER_F(pAd)) && IS_VERSION_AFTER_F(pAd)
&& (pAd->StaCfg.PSControl.field.rt30xxPowerMode == 3)
&& (pAd->StaCfg.PSControl.field.EnableNewPS == TRUE)) {
RTMP_SEM_LOCK(&pAd->McuCmdLock);
if ((pAd->brt30xxBanMcuCmd == TRUE)
&& (Command != WAKE_MCU_CMD) && (Command != RFOFF_MCU_CMD)) {
RTMP_SEM_UNLOCK(&pAd->McuCmdLock);
DBGPRINT(RT_DEBUG_TRACE,
(" Ban Mcu Cmd %x in sleep mode\n", Command));
return FALSE;
} else if ((Command == SLEEP_MCU_CMD)
|| (Command == RFOFF_MCU_CMD)) {
pAd->brt30xxBanMcuCmd = TRUE;
} else if (Command != WAKE_MCU_CMD) {
pAd->brt30xxBanMcuCmd = FALSE;
}
RTMP_SEM_UNLOCK(&pAd->McuCmdLock);
}
if (((IS_RT3090(pAd) || IS_RT3572(pAd) || IS_RT3390(pAd))
&& IS_VERSION_AFTER_F(pAd)) && IS_VERSION_AFTER_F(pAd)
&& (pAd->StaCfg.PSControl.field.rt30xxPowerMode == 3)
&& (pAd->StaCfg.PSControl.field.EnableNewPS == TRUE)
&& (Command == WAKE_MCU_CMD)) {
do {
RTMP_IO_FORCE_READ32(pAd, H2M_MAILBOX_CSR,
&H2MMailbox.word);
if (H2MMailbox.field.Owner == 0)
break;
RTMPusecDelay(2);
DBGPRINT(RT_DEBUG_INFO,
("AsicSendCommanToMcu::Mail box is busy\n"));
} while (i++ < 100);
if (i > 100) {
DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
return FALSE;
}
H2MMailbox.field.Owner = 1; /* pass ownership to MCU */
H2MMailbox.field.CmdToken = Token;
H2MMailbox.field.HighByte = Arg1;
H2MMailbox.field.LowByte = Arg0;
RTMP_IO_FORCE_WRITE32(pAd, H2M_MAILBOX_CSR, H2MMailbox.word);
H2MCmd.word = 0;
H2MCmd.field.HostCommand = Command;
RTMP_IO_FORCE_WRITE32(pAd, HOST_CMD_CSR, H2MCmd.word);
} else
#endif /* PCIE_PS_SUPPORT // */
{
do {
RTMP_IO_READ32(pAd, H2M_MAILBOX_CSR, &H2MMailbox.word);
if (H2MMailbox.field.Owner == 0)
break;
RTMPusecDelay(2);
} while (i++ < 100);
if (i > 100) {
#ifdef RTMP_MAC_PCI
#endif /* RTMP_MAC_PCI // */
{
DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
}
return FALSE;
}
#ifdef RTMP_MAC_PCI
#endif /* RTMP_MAC_PCI // */
H2MMailbox.field.Owner = 1; /* pass ownership to MCU */
H2MMailbox.field.CmdToken = Token;
H2MMailbox.field.HighByte = Arg1;
H2MMailbox.field.LowByte = Arg0;
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_CSR, H2MMailbox.word);
H2MCmd.word = 0;
H2MCmd.field.HostCommand = Command;
RTMP_IO_WRITE32(pAd, HOST_CMD_CSR, H2MCmd.word);
if (Command != 0x80) {
}
}
#ifdef PCIE_PS_SUPPORT
/* 3090 MCU Wakeup command needs more time to be stable. */
/* Before stable, don't issue other MCU command to prevent from firmware error. */
if (((IS_RT3090(pAd) || IS_RT3572(pAd) || IS_RT3390(pAd))
&& IS_VERSION_AFTER_F(pAd)) && IS_VERSION_AFTER_F(pAd)
&& (pAd->StaCfg.PSControl.field.rt30xxPowerMode == 3)
&& (pAd->StaCfg.PSControl.field.EnableNewPS == TRUE)
&& (Command == WAKE_MCU_CMD)) {
RTMPusecDelay(2000);
/*Put this is after RF programming. */
/*NdisAcquireSpinLock(&pAd->McuCmdLock); */
/*pAd->brt30xxBanMcuCmd = FALSE; */
/*NdisReleaseSpinLock(&pAd->McuCmdLock); */
}
#endif /* PCIE_PS_SUPPORT // */
return TRUE;
}
| gpl-2.0 |
clearwa/mypi | drivers/input/misc/da9055_onkey.c | 1545 | 4031 | /*
* ON pin driver for Dialog DA9055 PMICs
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
struct da9055_onkey {
struct da9055 *da9055;
struct input_dev *input;
struct delayed_work work;
};
static void da9055_onkey_query(struct da9055_onkey *onkey)
{
int key_stat;
key_stat = da9055_reg_read(onkey->da9055, DA9055_REG_STATUS_A);
if (key_stat < 0) {
dev_err(onkey->da9055->dev,
"Failed to read onkey event %d\n", key_stat);
} else {
key_stat &= DA9055_NOKEY_STS;
/*
* Onkey status bit is cleared when onkey button is released.
*/
if (!key_stat) {
input_report_key(onkey->input, KEY_POWER, 0);
input_sync(onkey->input);
}
}
/*
* Interrupt is generated only when the ONKEY pin is asserted.
* Hence the deassertion of the pin is simulated through work queue.
*/
if (key_stat)
schedule_delayed_work(&onkey->work, msecs_to_jiffies(10));
}
static void da9055_onkey_work(struct work_struct *work)
{
struct da9055_onkey *onkey = container_of(work, struct da9055_onkey,
work.work);
da9055_onkey_query(onkey);
}
static irqreturn_t da9055_onkey_irq(int irq, void *data)
{
struct da9055_onkey *onkey = data;
input_report_key(onkey->input, KEY_POWER, 1);
input_sync(onkey->input);
da9055_onkey_query(onkey);
return IRQ_HANDLED;
}
static int da9055_onkey_probe(struct platform_device *pdev)
{
struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
struct da9055_onkey *onkey;
struct input_dev *input_dev;
int irq, err;
irq = platform_get_irq_byname(pdev, "ONKEY");
if (irq < 0) {
dev_err(&pdev->dev,
"Failed to get an IRQ for input device, %d\n", irq);
return -EINVAL;
}
onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
if (!onkey) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
return -ENOMEM;
}
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
return -ENOMEM;
}
onkey->input = input_dev;
onkey->da9055 = da9055;
input_dev->name = "da9055-onkey";
input_dev->phys = "da9055-onkey/input0";
input_dev->dev.parent = &pdev->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, input_dev->keybit);
INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"ONKEY", onkey);
if (err < 0) {
dev_err(&pdev->dev,
"Failed to register ONKEY IRQ %d, error = %d\n",
irq, err);
goto err_free_input;
}
err = input_register_device(input_dev);
if (err) {
dev_err(&pdev->dev, "Unable to register input device, %d\n",
err);
goto err_free_irq;
}
platform_set_drvdata(pdev, onkey);
return 0;
err_free_irq:
free_irq(irq, onkey);
cancel_delayed_work_sync(&onkey->work);
err_free_input:
input_free_device(input_dev);
return err;
}
static int da9055_onkey_remove(struct platform_device *pdev)
{
struct da9055_onkey *onkey = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ONKEY");
irq = regmap_irq_get_virq(onkey->da9055->irq_data, irq);
free_irq(irq, onkey);
cancel_delayed_work_sync(&onkey->work);
input_unregister_device(onkey->input);
return 0;
}
static struct platform_driver da9055_onkey_driver = {
.probe = da9055_onkey_probe,
.remove = da9055_onkey_remove,
.driver = {
.name = "da9055-onkey",
},
};
module_platform_driver(da9055_onkey_driver);
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
MODULE_DESCRIPTION("Onkey driver for DA9055");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9055-onkey");
| gpl-2.0 |
TheTypoMaster/android_kernel_motoe | drivers/media/rc/ene_ir.c | 2057 | 31827 | /*
* driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX)
*
* Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* Special thanks to:
* Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore
* bringing to life support for transmission & learning mode.
*
* Charlie Andrews <charliethepilot@googlemail.com> for lots of help in
* bringing up the support of new firmware buffer that is popular
* on latest notebooks
*
* ENE for partial device documentation
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/rc-core.h>
#include "ene_ir.h"
static int sample_period;
static bool learning_mode_force;
static int debug;
static bool txsim;
static void ene_set_reg_addr(struct ene_device *dev, u16 reg)
{
outb(reg >> 8, dev->hw_io + ENE_ADDR_HI);
outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO);
}
/* read a hardware register */
static u8 ene_read_reg(struct ene_device *dev, u16 reg)
{
u8 retval;
ene_set_reg_addr(dev, reg);
retval = inb(dev->hw_io + ENE_IO);
dbg_regs("reg %04x == %02x", reg, retval);
return retval;
}
/* write a hardware register */
static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value)
{
dbg_regs("reg %04x <- %02x", reg, value);
ene_set_reg_addr(dev, reg);
outb(value, dev->hw_io + ENE_IO);
}
/* Set bits in hardware register */
static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
dbg_regs("reg %04x |= %02x", reg, mask);
ene_set_reg_addr(dev, reg);
outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO);
}
/* Clear bits in hardware register */
static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask)
{
dbg_regs("reg %04x &= ~%02x ", reg, mask);
ene_set_reg_addr(dev, reg);
outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO);
}
/* A helper to set/clear a bit in register according to boolean variable */
static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask,
bool set)
{
if (set)
ene_set_reg_mask(dev, reg, mask);
else
ene_clear_reg_mask(dev, reg, mask);
}
/* detect hardware features */
static int ene_hw_detect(struct ene_device *dev)
{
u8 chip_major, chip_minor;
u8 hw_revision, old_ver;
u8 fw_reg2, fw_reg1;
ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR);
chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR);
ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD);
hw_revision = ene_read_reg(dev, ENE_ECHV);
old_ver = ene_read_reg(dev, ENE_HW_VER_OLD);
dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) +
(ene_read_reg(dev, ENE_PLLFRL) >> 4);
if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD)
dev->rx_period_adjust =
dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4;
if (hw_revision == 0xFF) {
pr_warn("device seems to be disabled\n");
pr_warn("send a mail to lirc-list@lists.sourceforge.net\n");
pr_warn("please attach output of acpidump and dmidecode\n");
return -ENODEV;
}
pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n",
chip_major, chip_minor, old_ver, hw_revision);
pr_notice("PLL freq = %d\n", dev->pll_freq);
if (chip_major == 0x33) {
pr_warn("chips 0x33xx aren't supported\n");
return -ENODEV;
}
if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_C;
pr_notice("KB3926C detected\n");
} else if (old_ver == 0x24 && hw_revision == 0xC0) {
dev->hw_revision = ENE_HW_B;
pr_notice("KB3926B detected\n");
} else {
dev->hw_revision = ENE_HW_D;
pr_notice("KB3926D or higher detected\n");
}
/* detect features hardware supports */
if (dev->hw_revision < ENE_HW_C)
return 0;
fw_reg1 = ene_read_reg(dev, ENE_FW1);
fw_reg2 = ene_read_reg(dev, ENE_FW2);
pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2);
dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A);
dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING);
dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF);
if (dev->hw_learning_and_tx_capable)
dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT);
pr_notice("Hardware features:\n");
if (dev->hw_learning_and_tx_capable) {
pr_notice("* Supports transmitting & learning mode\n");
pr_notice(" This feature is rare and therefore,\n");
pr_notice(" you are welcome to test it,\n");
pr_notice(" and/or contact the author via:\n");
pr_notice(" lirc-list@lists.sourceforge.net\n");
pr_notice(" or maximlevitsky@gmail.com\n");
pr_notice("* Uses GPIO %s for IR raw input\n",
dev->hw_use_gpio_0a ? "40" : "0A");
if (dev->hw_fan_input)
pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n");
}
if (!dev->hw_fan_input)
pr_notice("* Uses GPIO %s for IR demodulated input\n",
dev->hw_use_gpio_0a ? "0A" : "40");
if (dev->hw_extra_buffer)
pr_notice("* Uses new style input buffer\n");
return 0;
}
/* Read properities of hw sample buffer */
static void ene_rx_setup_hw_buffer(struct ene_device *dev)
{
u16 tmp;
ene_rx_read_hw_pointer(dev);
dev->r_pointer = dev->w_pointer;
if (!dev->hw_extra_buffer) {
dev->buffer_len = ENE_FW_PACKET_SIZE * 2;
return;
}
tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER);
tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8;
dev->extra_buf1_address = tmp;
dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2);
tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3);
tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8;
dev->extra_buf2_address = tmp;
dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5);
dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8;
pr_notice("Hardware uses 2 extended buffers:\n");
pr_notice(" 0x%04x - len : %d\n",
dev->extra_buf1_address, dev->extra_buf1_len);
pr_notice(" 0x%04x - len : %d\n",
dev->extra_buf2_address, dev->extra_buf2_len);
pr_notice("Total buffer len = %d\n", dev->buffer_len);
if (dev->buffer_len > 64 || dev->buffer_len < 16)
goto error;
if (dev->extra_buf1_address > 0xFBFC ||
dev->extra_buf1_address < 0xEC00)
goto error;
if (dev->extra_buf2_address > 0xFBFC ||
dev->extra_buf2_address < 0xEC00)
goto error;
if (dev->r_pointer > dev->buffer_len)
goto error;
ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
return;
error:
pr_warn("Error validating extra buffers, device probably won't work\n");
dev->hw_extra_buffer = false;
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
/* Restore the pointers to extra buffers - to make module reload work*/
static void ene_rx_restore_hw_buffer(struct ene_device *dev)
{
if (!dev->hw_extra_buffer)
return;
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0,
dev->extra_buf1_address & 0xFF);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1,
dev->extra_buf1_address >> 8);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3,
dev->extra_buf2_address & 0xFF);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4,
dev->extra_buf2_address >> 8);
ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5,
dev->extra_buf2_len);
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND);
}
/* Read hardware write pointer */
static void ene_rx_read_hw_pointer(struct ene_device *dev)
{
if (dev->hw_extra_buffer)
dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER);
else
dev->w_pointer = ene_read_reg(dev, ENE_FW2)
& ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE;
dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x",
dev->w_pointer, dev->r_pointer);
}
/* Gets address of next sample from HW ring buffer */
static int ene_rx_get_sample_reg(struct ene_device *dev)
{
int r_pointer;
if (dev->r_pointer == dev->w_pointer) {
dbg_verbose("RB: hit end, try update w_pointer");
ene_rx_read_hw_pointer(dev);
}
if (dev->r_pointer == dev->w_pointer) {
dbg_verbose("RB: end of data at %d", dev->r_pointer);
return 0;
}
dbg_verbose("RB: reading at offset %d", dev->r_pointer);
r_pointer = dev->r_pointer;
dev->r_pointer++;
if (dev->r_pointer == dev->buffer_len)
dev->r_pointer = 0;
dbg_verbose("RB: next read will be from offset %d", dev->r_pointer);
if (r_pointer < 8) {
dbg_verbose("RB: read at main buffer at %d", r_pointer);
return ENE_FW_SAMPLE_BUFFER + r_pointer;
}
r_pointer -= 8;
if (r_pointer < dev->extra_buf1_len) {
dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer);
return dev->extra_buf1_address + r_pointer;
}
r_pointer -= dev->extra_buf1_len;
if (r_pointer < dev->extra_buf2_len) {
dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer);
return dev->extra_buf2_address + r_pointer;
}
dbg("attempt to read beyond ring buffer end");
return 0;
}
/* Sense current received carrier */
void ene_rx_sense_carrier(struct ene_device *dev)
{
DEFINE_IR_RAW_EVENT(ev);
int carrier, duty_cycle;
int period = ene_read_reg(dev, ENE_CIRCAR_PRD);
int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD);
if (!(period & ENE_CIRCAR_PRD_VALID))
return;
period &= ~ENE_CIRCAR_PRD_VALID;
if (!period)
return;
dbg("RX: hardware carrier period = %02x", period);
dbg("RX: hardware carrier pulse period = %02x", hperiod);
carrier = 2000000 / period;
duty_cycle = (hperiod * 100) / period;
dbg("RX: sensed carrier = %d Hz, duty cycle %d%%",
carrier, duty_cycle);
if (dev->carrier_detect_enabled) {
ev.carrier_report = true;
ev.carrier = carrier;
ev.duty_cycle = duty_cycle;
ir_raw_event_store(dev->rdev, &ev);
}
}
/* this enables/disables the CIR RX engine */
static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable)
{
ene_set_clear_reg_mask(dev, ENE_CIRCFG,
ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable);
}
/* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/
static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a)
{
ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a);
}
/*
* this enables alternative input via fan tachometer sensor and bypasses
* the hw CIR engine
*/
static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable)
{
if (!dev->hw_fan_input)
return;
if (!enable)
ene_write_reg(dev, ENE_FAN_AS_IN1, 0);
else {
ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN);
ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN);
}
}
/* setup the receiver for RX*/
static void ene_rx_setup(struct ene_device *dev)
{
bool learning_mode = dev->learning_mode_enabled ||
dev->carrier_detect_enabled;
int sample_period_adjust = 0;
dbg("RX: setup receiver, learning mode = %d", learning_mode);
/* This selects RLC input and clears CFG2 settings */
ene_write_reg(dev, ENE_CIRCFG2, 0x00);
/* set sample period*/
if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD)
sample_period_adjust =
dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2;
ene_write_reg(dev, ENE_CIRRLC_CFG,
(sample_period + sample_period_adjust) |
ENE_CIRRLC_CFG_OVERFLOW);
/* revB doesn't support inputs */
if (dev->hw_revision < ENE_HW_C)
goto select_timeout;
if (learning_mode) {
WARN_ON(!dev->hw_learning_and_tx_capable);
/* Enable the opposite of the normal input
That means that if GPIO40 is normally used, use GPIO0A
and vice versa.
This input will carry non demodulated
signal, and we will tell the hw to demodulate it itself */
ene_rx_select_input(dev, !dev->hw_use_gpio_0a);
dev->rx_fan_input_inuse = false;
/* Enable carrier demodulation */
ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
/* Enable carrier detection */
ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63);
ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT,
dev->carrier_detect_enabled || debug);
} else {
if (dev->hw_fan_input)
dev->rx_fan_input_inuse = true;
else
ene_rx_select_input(dev, dev->hw_use_gpio_0a);
/* Disable carrier detection & demodulation */
ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD);
ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT);
}
select_timeout:
if (dev->rx_fan_input_inuse) {
dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
/* Fan input doesn't support timeouts, it just ends the
input with a maximum sample */
dev->rdev->min_timeout = dev->rdev->max_timeout =
US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
ENE_FW_SAMPLE_PERIOD_FAN);
} else {
dev->rdev->rx_resolution = US_TO_NS(sample_period);
/* Theoreticly timeout is unlimited, but we cap it
* because it was seen that on one device, it
* would stop sending spaces after around 250 msec.
* Besides, this is close to 2^32 anyway and timeout is u32.
*/
dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
dev->rdev->max_timeout = US_TO_NS(200000);
}
if (dev->hw_learning_and_tx_capable)
dev->rdev->tx_resolution = US_TO_NS(sample_period);
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
if (dev->rdev->timeout < dev->rdev->min_timeout)
dev->rdev->timeout = dev->rdev->min_timeout;
}
/* Enable the device for receive */
static void ene_rx_enable(struct ene_device *dev)
{
u8 reg_value;
/* Enable system interrupt */
if (dev->hw_revision < ENE_HW_C) {
ene_write_reg(dev, ENEB_IRQ, dev->irq << 1);
ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01);
} else {
reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0;
reg_value |= ENE_IRQ_UNK_EN;
reg_value &= ~ENE_IRQ_STATUS;
reg_value |= (dev->irq & ENE_IRQ_MASK);
ene_write_reg(dev, ENE_IRQ, reg_value);
}
/* Enable inputs */
ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse);
ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse);
/* ack any pending irqs - just in case */
ene_irq_status(dev);
/* enable firmware bits */
ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
/* enter idle mode */
ir_raw_event_set_idle(dev->rdev, true);
dev->rx_enabled = true;
}
/* Disable the device receiver */
static void ene_rx_disable(struct ene_device *dev)
{
/* disable inputs */
ene_rx_enable_cir_engine(dev, false);
ene_rx_enable_fan_input(dev, false);
/* disable hardware IRQ and firmware flag */
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
ir_raw_event_set_idle(dev->rdev, true);
dev->rx_enabled = false;
}
/* This resets the receiver. Useful to stop stream of spaces at end of
* transmission
*/
static void ene_rx_reset(struct ene_device *dev)
{
ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN);
}
/* Set up the TX carrier frequency and duty cycle */
static void ene_tx_set_carrier(struct ene_device *dev)
{
u8 tx_puls_width;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_set_clear_reg_mask(dev, ENE_CIRCFG,
ENE_CIRCFG_TX_CARR, dev->tx_period > 0);
if (!dev->tx_period)
goto unlock;
BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0);
tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle);
if (!tx_puls_width)
tx_puls_width = 1;
dbg("TX: pulse distance = %d * 500 ns", dev->tx_period);
dbg("TX: pulse width = %d * 500 ns", tx_puls_width);
ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL);
ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width);
unlock:
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* Enable/disable transmitters */
static void ene_tx_set_transmitters(struct ene_device *dev)
{
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41,
!!(dev->transmitter_mask & 0x01));
ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D,
!!(dev->transmitter_mask & 0x02));
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* prepare transmission */
static void ene_tx_enable(struct ene_device *dev)
{
u8 conf1 = ene_read_reg(dev, ENE_CIRCFG);
u8 fwreg2 = ene_read_reg(dev, ENE_FW2);
dev->saved_conf1 = conf1;
/* Show information about currently connected transmitter jacks */
if (fwreg2 & ENE_FW2_EMMITER1_CONN)
dbg("TX: Transmitter #1 is connected");
if (fwreg2 & ENE_FW2_EMMITER2_CONN)
dbg("TX: Transmitter #2 is connected");
if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN)))
pr_warn("TX: transmitter cable isn't connected!\n");
/* disable receive on revc */
if (dev->hw_revision == ENE_HW_C)
conf1 &= ~ENE_CIRCFG_RX_EN;
/* Enable TX engine */
conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ;
ene_write_reg(dev, ENE_CIRCFG, conf1);
}
/* end transmission */
static void ene_tx_disable(struct ene_device *dev)
{
ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1);
dev->tx_buffer = NULL;
}
/* TX one sample - must be called with dev->hw_lock*/
static void ene_tx_sample(struct ene_device *dev)
{
u8 raw_tx;
u32 sample;
bool pulse = dev->tx_sample_pulse;
if (!dev->tx_buffer) {
pr_warn("TX: BUG: attempt to transmit NULL buffer\n");
return;
}
/* Grab next TX sample */
if (!dev->tx_sample) {
if (dev->tx_pos == dev->tx_len) {
if (!dev->tx_done) {
dbg("TX: no more data to send");
dev->tx_done = true;
goto exit;
} else {
dbg("TX: last sample sent by hardware");
ene_tx_disable(dev);
complete(&dev->tx_complete);
return;
}
}
sample = dev->tx_buffer[dev->tx_pos++];
dev->tx_sample_pulse = !dev->tx_sample_pulse;
dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period);
if (!dev->tx_sample)
dev->tx_sample = 1;
}
raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK);
dev->tx_sample -= raw_tx;
dbg("TX: sample %8d (%s)", raw_tx * sample_period,
pulse ? "pulse" : "space");
if (pulse)
raw_tx |= ENE_CIRRLC_OUT_PULSE;
ene_write_reg(dev,
dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx);
dev->tx_reg = !dev->tx_reg;
exit:
/* simulate TX done interrupt */
if (txsim)
mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500);
}
/* timer to simulate tx done interrupt */
static void ene_tx_irqsim(unsigned long data)
{
struct ene_device *dev = (struct ene_device *)data;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_sample(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* read irq status and ack it */
static int ene_irq_status(struct ene_device *dev)
{
u8 irq_status;
u8 fw_flags1, fw_flags2;
int retval = 0;
fw_flags2 = ene_read_reg(dev, ENE_FW2);
if (dev->hw_revision < ENE_HW_C) {
irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS);
if (!(irq_status & ENEB_IRQ_STATUS_IR))
return 0;
ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR);
return ENE_IRQ_RX;
}
irq_status = ene_read_reg(dev, ENE_IRQ);
if (!(irq_status & ENE_IRQ_STATUS))
return 0;
/* original driver does that twice - a workaround ? */
ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS);
/* check RX interrupt */
if (fw_flags2 & ENE_FW2_RXIRQ) {
retval |= ENE_IRQ_RX;
ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ);
}
/* check TX interrupt */
fw_flags1 = ene_read_reg(dev, ENE_FW1);
if (fw_flags1 & ENE_FW1_TXIRQ) {
ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ);
retval |= ENE_IRQ_TX;
}
return retval;
}
/* interrupt handler */
static irqreturn_t ene_isr(int irq, void *data)
{
u16 hw_value, reg;
int hw_sample, irq_status;
bool pulse;
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
struct ene_device *dev = (struct ene_device *)data;
DEFINE_IR_RAW_EVENT(ev);
spin_lock_irqsave(&dev->hw_lock, flags);
dbg_verbose("ISR called");
ene_rx_read_hw_pointer(dev);
irq_status = ene_irq_status(dev);
if (!irq_status)
goto unlock;
retval = IRQ_HANDLED;
if (irq_status & ENE_IRQ_TX) {
dbg_verbose("TX interrupt");
if (!dev->hw_learning_and_tx_capable) {
dbg("TX interrupt on unsupported device!");
goto unlock;
}
ene_tx_sample(dev);
}
if (!(irq_status & ENE_IRQ_RX))
goto unlock;
dbg_verbose("RX interrupt");
if (dev->hw_learning_and_tx_capable)
ene_rx_sense_carrier(dev);
/* On hardware that don't support extra buffer we need to trust
the interrupt and not track the read pointer */
if (!dev->hw_extra_buffer)
dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0;
while (1) {
reg = ene_rx_get_sample_reg(dev);
dbg_verbose("next sample to read at: %04x", reg);
if (!reg)
break;
hw_value = ene_read_reg(dev, reg);
if (dev->rx_fan_input_inuse) {
int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER;
/* read high part of the sample */
hw_value |= ene_read_reg(dev, reg + offset) << 8;
pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS;
/* clear space bit, and other unused bits */
hw_value &= ENE_FW_SMPL_BUF_FAN_MSK;
hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN;
} else {
pulse = !(hw_value & ENE_FW_SAMPLE_SPACE);
hw_value &= ~ENE_FW_SAMPLE_SPACE;
hw_sample = hw_value * sample_period;
if (dev->rx_period_adjust) {
hw_sample *= 100;
hw_sample /= (100 + dev->rx_period_adjust);
}
}
if (!dev->hw_extra_buffer && !hw_sample) {
dev->r_pointer = dev->w_pointer;
continue;
}
dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
ev.duration = US_TO_NS(hw_sample);
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
ir_raw_event_handle(dev->rdev);
unlock:
spin_unlock_irqrestore(&dev->hw_lock, flags);
return retval;
}
/* Initialize default settings */
static void ene_setup_default_settings(struct ene_device *dev)
{
dev->tx_period = 32;
dev->tx_duty_cycle = 50; /*%*/
dev->transmitter_mask = 0x03;
dev->learning_mode_enabled = learning_mode_force;
/* Set reasonable default timeout */
dev->rdev->timeout = US_TO_NS(150000);
}
/* Upload all hardware settings at once. Used at load and resume time */
static void ene_setup_hw_settings(struct ene_device *dev)
{
if (dev->hw_learning_and_tx_capable) {
ene_tx_set_carrier(dev);
ene_tx_set_transmitters(dev);
}
ene_rx_setup(dev);
}
/* outside interface: called on first open*/
static int ene_open(struct rc_dev *rdev)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: called on device close*/
static void ene_close(struct rc_dev *rdev)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
}
/* outside interface: set transmitter mask */
static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask)
{
struct ene_device *dev = rdev->priv;
dbg("TX: attempt to set transmitter mask %02x", tx_mask);
/* invalid txmask */
if (!tx_mask || tx_mask & ~0x03) {
dbg("TX: invalid mask");
/* return count of transmitters */
return 2;
}
dev->transmitter_mask = tx_mask;
ene_tx_set_transmitters(dev);
return 0;
}
/* outside interface : set tx carrier */
static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier)
{
struct ene_device *dev = rdev->priv;
u32 period = 2000000 / carrier;
dbg("TX: attempt to set tx carrier to %d kHz", carrier);
if (period && (period > ENE_CIRMOD_PRD_MAX ||
period < ENE_CIRMOD_PRD_MIN)) {
dbg("TX: out of range %d-%d kHz carrier",
2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
return -1;
}
dev->tx_period = period;
ene_tx_set_carrier(dev);
return 0;
}
/*outside interface : set tx duty cycle */
static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle)
{
struct ene_device *dev = rdev->priv;
dbg("TX: setting duty cycle to %d%%", duty_cycle);
dev->tx_duty_cycle = duty_cycle;
ene_tx_set_carrier(dev);
return 0;
}
/* outside interface: enable learning mode */
static int ene_set_learning_mode(struct rc_dev *rdev, int enable)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
if (enable == dev->learning_mode_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
dev->learning_mode_enabled = enable;
ene_rx_disable(dev);
ene_rx_setup(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
static int ene_set_carrier_report(struct rc_dev *rdev, int enable)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
if (enable == dev->carrier_detect_enabled)
return 0;
spin_lock_irqsave(&dev->hw_lock, flags);
dev->carrier_detect_enabled = enable;
ene_rx_disable(dev);
ene_rx_setup(dev);
ene_rx_enable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
return 0;
}
/* outside interface: enable or disable idle mode */
static void ene_set_idle(struct rc_dev *rdev, bool idle)
{
struct ene_device *dev = rdev->priv;
if (idle) {
ene_rx_reset(dev);
dbg("RX: end of data");
}
}
/* outside interface: transmit */
static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
dev->tx_buffer = buf;
dev->tx_len = n;
dev->tx_pos = 0;
dev->tx_reg = 0;
dev->tx_done = 0;
dev->tx_sample = 0;
dev->tx_sample_pulse = 0;
dbg("TX: %d samples", dev->tx_len);
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_enable(dev);
/* Transmit first two samples */
ene_tx_sample(dev);
ene_tx_sample(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) {
dbg("TX: timeout");
spin_lock_irqsave(&dev->hw_lock, flags);
ene_tx_disable(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
} else
dbg("TX: done");
return n;
}
/* probe entry */
static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
{
int error = -ENOMEM;
struct rc_dev *rdev;
struct ene_device *dev;
/* allocate memory */
dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL);
rdev = rc_allocate_device();
if (!dev || !rdev)
goto error1;
/* validate resources */
error = -ENODEV;
/* init these to -1, as 0 is valid for both */
dev->hw_io = -1;
dev->irq = -1;
if (!pnp_port_valid(pnp_dev, 0) ||
pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
goto error;
if (!pnp_irq_valid(pnp_dev, 0))
goto error;
spin_lock_init(&dev->hw_lock);
dev->hw_io = pnp_port_start(pnp_dev, 0);
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
/* don't allow too short/long sample periods */
if (sample_period < 5 || sample_period > 0x7F)
sample_period = ENE_DEFAULT_SAMPLE_PERIOD;
/* detect hardware version and features */
error = ene_hw_detect(dev);
if (error)
goto error;
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
(long unsigned int)dev);
pr_warn("Simulation of TX activated\n");
}
if (!dev->hw_learning_and_tx_capable)
learning_mode_force = false;
rdev->driver_type = RC_DRIVER_IR_RAW;
rdev->allowed_protos = RC_TYPE_ALL;
rdev->priv = dev;
rdev->open = ene_open;
rdev->close = ene_close;
rdev->s_idle = ene_set_idle;
rdev->driver_name = ENE_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
rdev->input_name = "ENE eHome Infrared Remote Receiver";
if (dev->hw_learning_and_tx_capable) {
rdev->s_learning_mode = ene_set_learning_mode;
init_completion(&dev->tx_complete);
rdev->tx_ir = ene_transmit;
rdev->s_tx_mask = ene_set_tx_mask;
rdev->s_tx_carrier = ene_set_tx_carrier;
rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle;
rdev->s_carrier_report = ene_set_carrier_report;
rdev->input_name = "ENE eHome Infrared Remote Transceiver";
}
dev->rdev = rdev;
ene_rx_setup_hw_buffer(dev);
ene_setup_default_settings(dev);
ene_setup_hw_settings(dev);
device_set_wakeup_capable(&pnp_dev->dev, true);
device_set_wakeup_enable(&pnp_dev->dev, true);
/* claim the resources */
error = -EBUSY;
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
dev->hw_io = -1;
dev->irq = -1;
goto error;
}
dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
dev->irq = -1;
goto error;
}
error = rc_register_device(rdev);
if (error < 0)
goto error;
pr_notice("driver has been successfully loaded\n");
return 0;
error:
if (dev && dev->irq >= 0)
free_irq(dev->irq, dev);
if (dev && dev->hw_io >= 0)
release_region(dev->hw_io, ENE_IO_SIZE);
error1:
rc_free_device(rdev);
kfree(dev);
return error;
}
/* main unload function */
static void ene_remove(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
ene_rx_disable(dev);
ene_rx_restore_hw_buffer(dev);
spin_unlock_irqrestore(&dev->hw_lock, flags);
free_irq(dev->irq, dev);
release_region(dev->hw_io, ENE_IO_SIZE);
rc_unregister_device(dev->rdev);
kfree(dev);
}
/* enable wake on IR (wakes on specific button on original remote) */
static void ene_enable_wake(struct ene_device *dev, int enable)
{
enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
dbg("wake on IR %s", enable ? "enabled" : "disabled");
ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
}
#ifdef CONFIG_PM
static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
ene_enable_wake(dev, true);
/* TODO: add support for wake pattern */
return 0;
}
static int ene_resume(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
ene_setup_hw_settings(dev);
if (dev->rx_enabled)
ene_rx_enable(dev);
ene_enable_wake(dev, false);
return 0;
}
#endif
static void ene_shutdown(struct pnp_dev *pnp_dev)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
ene_enable_wake(dev, true);
}
static const struct pnp_device_id ene_ids[] = {
{.id = "ENE0100",},
{.id = "ENE0200",},
{.id = "ENE0201",},
{.id = "ENE0202",},
{},
};
static struct pnp_driver ene_driver = {
.name = ENE_DRIVER_NAME,
.id_table = ene_ids,
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.probe = ene_probe,
.remove = __devexit_p(ene_remove),
#ifdef CONFIG_PM
.suspend = ene_suspend,
.resume = ene_resume,
#endif
.shutdown = ene_shutdown,
};
static int __init ene_init(void)
{
return pnp_register_driver(&ene_driver);
}
static void ene_exit(void)
{
pnp_unregister_driver(&ene_driver);
}
module_param(sample_period, int, S_IRUGO);
MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)");
module_param(learning_mode_force, bool, S_IRUGO);
MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default");
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level");
module_param(txsim, bool, S_IRUGO);
MODULE_PARM_DESC(txsim,
"Simulate TX features on unsupported hardware (dangerous)");
MODULE_DEVICE_TABLE(pnp, ene_ids);
MODULE_DESCRIPTION
("Infrared input driver for KB3926B/C/D/E/F "
"(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
MODULE_AUTHOR("Maxim Levitsky");
MODULE_LICENSE("GPL");
module_init(ene_init);
module_exit(ene_exit);
| gpl-2.0 |
bsmitty83/Kernel_Tuna_AOSP | drivers/staging/iio/adc/ad7887_ring.c | 2313 | 5027 | /*
* Copyright 2010-2011 Analog Devices Inc.
* Copyright (C) 2008 Jonathan Cameron
*
* Licensed under the GPL-2.
*
* ad7887_ring.c
*/
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include "../iio.h"
#include "../ring_generic.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../sysfs.h"
#include "ad7887.h"
int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
{
struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
int count = 0, ret;
u16 *ring_data;
if (!(ring->scan_mask & mask)) {
ret = -EBUSY;
goto error_ret;
}
ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
GFP_KERNEL);
if (ring_data == NULL) {
ret = -ENOMEM;
goto error_ret;
}
ret = ring->access->read_last(ring, (u8 *) ring_data);
if (ret)
goto error_free_ring_data;
/* for single channel scan the result is stored with zero offset */
if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1)))
count = 1;
ret = be16_to_cpu(ring_data[count]);
error_free_ring_data:
kfree(ring_data);
error_ret:
return ret;
}
/**
* ad7887_ring_preenable() setup the parameters of the ring before enabling
*
* The complex nature of the setting of the nuber of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
static int ad7887_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7887_state *st = indio_dev->dev_data;
struct iio_ring_buffer *ring = indio_dev->ring;
st->d_size = ring->scan_count *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
st->d_size += sizeof(s64);
if (st->d_size % sizeof(s64))
st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
}
if (indio_dev->ring->access->set_bytes_per_datum)
indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
st->d_size);
switch (ring->scan_mask) {
case (1 << 0):
st->ring_msg = &st->msg[AD7887_CH0];
break;
case (1 << 1):
st->ring_msg = &st->msg[AD7887_CH1];
/* Dummy read: push CH1 setting down to hardware */
spi_sync(st->spi, st->ring_msg);
break;
case ((1 << 1) | (1 << 0)):
st->ring_msg = &st->msg[AD7887_CH0_CH1];
break;
}
return 0;
}
static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
{
struct ad7887_state *st = indio_dev->dev_data;
/* dummy read: restore default CH0 settin */
return spi_sync(st->spi, &st->msg[AD7887_CH0]);
}
/**
* ad7887_trigger_handler() bh of trigger launched polling to ring buffer
*
* Currently there is no option in this driver to disable the saving of
* timestamps within the ring.
**/
static irqreturn_t ad7887_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
struct ad7887_state *st = iio_dev_get_devdata(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
s64 time_ns;
__u8 *buf;
int b_sent;
unsigned int bytes = ring->scan_count *
st->chip_info->channel[0].scan_type.storagebits / 8;
buf = kzalloc(st->d_size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
b_sent = spi_sync(st->spi, st->ring_msg);
if (b_sent)
goto done;
time_ns = iio_get_time_ns();
memcpy(buf, st->data, bytes);
if (ring->scan_timestamp)
memcpy(buf + st->d_size - sizeof(s64),
&time_ns, sizeof(time_ns));
indio_dev->ring->access->store_to(indio_dev->ring, buf, time_ns);
done:
kfree(buf);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static const struct iio_ring_setup_ops ad7887_ring_setup_ops = {
.preenable = &ad7887_ring_preenable,
.postenable = &iio_triggered_ring_postenable,
.predisable = &iio_triggered_ring_predisable,
.postdisable = &ad7887_ring_postdisable,
};
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
int ret;
indio_dev->ring = iio_sw_rb_allocate(indio_dev);
if (!indio_dev->ring) {
ret = -ENOMEM;
goto error_ret;
}
/* Effectively select the ring buffer implementation */
indio_dev->ring->access = &ring_sw_access_funcs;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ad7887_trigger_handler,
IRQF_ONESHOT,
indio_dev,
"ad7887_consumer%d",
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_deallocate_sw_rb;
}
/* Ring buffer functions - here trigger setup related */
indio_dev->ring->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
error_deallocate_sw_rb:
iio_sw_rb_free(indio_dev->ring);
error_ret:
return ret;
}
void ad7887_ring_cleanup(struct iio_dev *indio_dev)
{
/* ensure that the trigger has been detached */
if (indio_dev->trig) {
iio_put_trigger(indio_dev->trig);
iio_trigger_dettach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_sw_rb_free(indio_dev->ring);
}
| gpl-2.0 |
Jackeagle/kernel | drivers/staging/vt6655/rxtx.c | 2313 | 112283 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: rxtx.c
*
* Purpose: handle WMAC/802.3/802.11 rx & tx functions
*
* Author: Lyndon Chen
*
* Date: May 20, 2003
*
* Functions:
* s_vGenerateTxParameter - Generate tx dma required parameter.
* vGenerateMACHeader - Translate 802.3 to 802.11 header
* cbGetFragCount - Calculate fragment number count
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_cbFillTxBufHead - fulfill tx dma buffer header
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
* s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
* s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
* vDMA0_tx_80211- tx 802.11 frame via dma0
* vGenerateFIFOHeader- Generate tx FIFO ctl header
*
* Revision History:
*
*/
#include "device.h"
#include "rxtx.h"
#include "tether.h"
#include "card.h"
#include "bssdb.h"
#include "mac.h"
#include "baseband.h"
#include "michael.h"
#include "tkip.h"
#include "tcrc.h"
#include "wctl.h"
#include "wroute.h"
#include "hostap.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel = MSG_LEVEL_INFO;
#define PLICE_DEBUG
/*--------------------- Static Functions --------------------------*/
/*--------------------- Static Definitions -------------------------*/
#define CRITICAL_PACKET_LEN 256 // if packet size < 256 -> in-direct send
// packet size >= 256 -> direct send
const unsigned short wTimeStampOff[2][MAX_RATE] = {
{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23}, // Long Preamble
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23}, // Short Preamble
};
const unsigned short wFB_Opt0[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_36M, RATE_48M}, // fallback_rate0
{RATE_12M, RATE_12M, RATE_18M, RATE_24M, RATE_36M}, // fallback_rate1
};
const unsigned short wFB_Opt1[2][5] = {
{RATE_12M, RATE_18M, RATE_24M, RATE_24M, RATE_36M}, // fallback_rate0
{RATE_6M , RATE_6M, RATE_12M, RATE_12M, RATE_18M}, // fallback_rate1
};
#define RTSDUR_BB 0
#define RTSDUR_BA 1
#define RTSDUR_AA 2
#define CTSDUR_BA 3
#define RTSDUR_BA_F0 4
#define RTSDUR_AA_F0 5
#define RTSDUR_BA_F1 6
#define RTSDUR_AA_F1 7
#define CTSDUR_BA_F0 8
#define CTSDUR_BA_F1 9
#define DATADUR_B 10
#define DATADUR_A 11
#define DATADUR_A_F0 12
#define DATADUR_A_F1 13
/*--------------------- Static Functions --------------------------*/
static
void
s_vFillTxKey(
PSDevice pDevice,
unsigned char *pbyBuf,
unsigned char *pbyIVHead,
PSKeyItem pTransmitKey,
unsigned char *pbyHdrBuf,
unsigned short wPayloadLen,
unsigned char *pMICHDR
);
static
void
s_vFillRTSHead(
PSDevice pDevice,
unsigned char byPktType,
void *pvRTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
PSEthernetHeader psEthHeader,
unsigned short wCurrentRate,
unsigned char byFBOption
);
static
void
s_vGenerateTxParameter(
PSDevice pDevice,
unsigned char byPktType,
void *pTxBufHead,
void *pvRrvTime,
void *pvRTS,
void *pvCTS,
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
PSEthernetHeader psEthHeader,
unsigned short wCurrentRate
);
static void s_vFillFragParameter(
PSDevice pDevice,
unsigned char *pbyBuffer,
unsigned int uTxType,
void *pvtdCurr,
unsigned short wFragType,
unsigned int cbReqCount
);
static unsigned int
s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD,
PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt,
PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum);
static
unsigned int
s_uFillDataHead(
PSDevice pDevice,
unsigned char byPktType,
void *pTxDataHead,
unsigned int cbFrameLength,
unsigned int uDMAIdx,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
unsigned short wCurrentRate
);
/*--------------------- Export Variables --------------------------*/
static
void
s_vFillTxKey(
PSDevice pDevice,
unsigned char *pbyBuf,
unsigned char *pbyIVHead,
PSKeyItem pTransmitKey,
unsigned char *pbyHdrBuf,
unsigned short wPayloadLen,
unsigned char *pMICHDR
)
{
unsigned long *pdwIV = (unsigned long *)pbyIVHead;
unsigned long *pdwExtIV = (unsigned long *)((unsigned char *)pbyIVHead+4);
unsigned short wValue;
PS802_11Header pMACHeader = (PS802_11Header)pbyHdrBuf;
unsigned long dwRevIVCounter;
unsigned char byKeyIndex = 0;
//Fill TXKEY
if (pTransmitKey == NULL)
return;
dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
*pdwIV = pDevice->dwIVCounter;
byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
memcpy(pDevice->abyPRNG, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
} else {
memcpy(pbyBuf, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
if (pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
memcpy(pbyBuf+8, (unsigned char *)&(dwRevIVCounter), 3);
memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
}
memcpy(pDevice->abyPRNG, pbyBuf, 16);
}
// Append IV after Mac Header
*pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
*pdwIV |= (unsigned long)byKeyIndex << 30;
*pdwIV = cpu_to_le32(*pdwIV);
pDevice->dwIVCounter++;
if (pDevice->dwIVCounter > WEP_IV_MASK) {
pDevice->dwIVCounter = 0;
}
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
pTransmitKey->wTSC15_0++;
if (pTransmitKey->wTSC15_0 == 0) {
pTransmitKey->dwTSC47_16++;
}
TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
memcpy(pbyBuf, pDevice->abyPRNG, 16);
// Make IV
memcpy(pdwIV, pDevice->abyPRNG, 3);
*(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
// Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
} else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
pTransmitKey->wTSC15_0++;
if (pTransmitKey->wTSC15_0 == 0) {
pTransmitKey->dwTSC47_16++;
}
memcpy(pbyBuf, pTransmitKey->abyKey, 16);
// Make IV
*pdwIV = 0;
*(pbyIVHead+3) = (unsigned char)(((byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
*pdwIV |= cpu_to_le16((unsigned short)(pTransmitKey->wTSC15_0));
//Append IV&ExtIV after Mac Header
*pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
//Fill MICHDR0
*pMICHDR = 0x59;
*((unsigned char *)(pMICHDR+1)) = 0; // TxPriority
memcpy(pMICHDR+2, &(pMACHeader->abyAddr2[0]), 6);
*((unsigned char *)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
*((unsigned char *)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
*((unsigned char *)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
*((unsigned char *)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
*((unsigned char *)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
*((unsigned char *)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
*((unsigned char *)(pMICHDR+14)) = HIBYTE(wPayloadLen);
*((unsigned char *)(pMICHDR+15)) = LOBYTE(wPayloadLen);
//Fill MICHDR1
*((unsigned char *)(pMICHDR+16)) = 0; // HLEN[15:8]
if (pDevice->bLongHeader) {
*((unsigned char *)(pMICHDR+17)) = 28; // HLEN[7:0]
} else {
*((unsigned char *)(pMICHDR+17)) = 22; // HLEN[7:0]
}
wValue = cpu_to_le16(pMACHeader->wFrameCtl & 0xC78F);
memcpy(pMICHDR+18, (unsigned char *)&wValue, 2); // MSKFRACTL
memcpy(pMICHDR+20, &(pMACHeader->abyAddr1[0]), 6);
memcpy(pMICHDR+26, &(pMACHeader->abyAddr2[0]), 6);
//Fill MICHDR2
memcpy(pMICHDR+32, &(pMACHeader->abyAddr3[0]), 6);
wValue = pMACHeader->wSeqCtl;
wValue &= 0x000F;
wValue = cpu_to_le16(wValue);
memcpy(pMICHDR+38, (unsigned char *)&wValue, 2); // MSKSEQCTL
if (pDevice->bLongHeader) {
memcpy(pMICHDR+40, &(pMACHeader->abyAddr4[0]), 6);
}
}
}
static
void
s_vSWencryption(
PSDevice pDevice,
PSKeyItem pTransmitKey,
unsigned char *pbyPayloadHead,
unsigned short wPayloadSize
)
{
unsigned int cbICVlen = 4;
unsigned long dwICV = 0xFFFFFFFFL;
unsigned long *pdwICV;
if (pTransmitKey == NULL)
return;
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
//=======================================================================
// Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
rc4_init(&pDevice->SBox, pDevice->abyPRNG, pTransmitKey->uKeyLength + 3);
rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
//=======================================================================
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
//=======================================================================
//Append ICV after payload
dwICV = CRCdwGetCrc32Ex(pbyPayloadHead, wPayloadSize, dwICV);//ICV(Payload)
pdwICV = (unsigned long *)(pbyPayloadHead + wPayloadSize);
// finally, we must invert dwCRC to get the correct answer
*pdwICV = cpu_to_le32(~dwICV);
// RC4 encryption
rc4_init(&pDevice->SBox, pDevice->abyPRNG, TKIP_KEY_LEN);
rc4_encrypt(&pDevice->SBox, pbyPayloadHead, pbyPayloadHead, wPayloadSize+cbICVlen);
//=======================================================================
}
}
/*byPktType : PK_TYPE_11A 0
PK_TYPE_11B 1
PK_TYPE_11GB 2
PK_TYPE_11GA 3
*/
static
unsigned int
s_uGetTxRsvTime(
PSDevice pDevice,
unsigned char byPktType,
unsigned int cbFrameLength,
unsigned short wRate,
bool bNeedAck
)
{
unsigned int uDataTime, uAckTime;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
if (byPktType == PK_TYPE_11B) {//llb,CCK mode
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopCCKBasicRate);
} else {//11g 2.4G OFDM mode & 11a 5G OFDM mode
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopOFDMBasicRate);
}
if (bNeedAck) {
return uDataTime + pDevice->uSIFS + uAckTime;
} else {
return uDataTime;
}
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
unsigned int
s_uGetRTSCTSRsvTime(
PSDevice pDevice,
unsigned char byRTSRsvType,
unsigned char byPktType,
unsigned int cbFrameLength,
unsigned short wCurrentRate
)
{
unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
if (byRTSRsvType == 0) { //RTSTxRrvTime_bb
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
} else if (byRTSRsvType == 1) { //RTSTxRrvTime_ba, only in 2.4GHZ
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
} else if (byRTSRsvType == 2) { //RTSTxRrvTime_aa
uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
} else if (byRTSRsvType == 3) { //CTSTxRrvTime_ba, only in 2.4GHZ
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uRrvTime = uCTSTime + uAckTime + uDataTime + 2*pDevice->uSIFS;
return uRrvTime;
}
//RTSRrvTime
uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
return uRrvTime;
}
//byFreqType 0: 5GHz, 1:2.4Ghz
static
unsigned int
s_uGetDataDuration(
PSDevice pDevice,
unsigned char byDurType,
unsigned int cbFrameLength,
unsigned char byPktType,
unsigned short wRate,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption
)
{
bool bLastFrag = 0;
unsigned int uAckTime = 0, uNextPktTime = 0;
if (uFragIdx == (uMACfragNum-1)) {
bLastFrag = 1;
}
switch (byDurType) {
case DATADUR_B: //DATADUR_B
if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return pDevice->uSIFS + uAckTime;
} else {
return 0;
}
} else {//First Frag or Mid Frag
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
}
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
return pDevice->uSIFS + uAckTime + uNextPktTime;
} else {
return pDevice->uSIFS + uNextPktTime;
}
}
break;
case DATADUR_A: //DATADUR_A
if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
} else {
return 0;
}
} else {//First Frag or Mid Frag
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
}
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime + uNextPktTime;
} else {
return pDevice->uSIFS + uNextPktTime;
}
}
break;
case DATADUR_A_F0: //DATADUR_A_F0
if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
} else {
return 0;
}
} else { //First Frag or Mid Frag
if (byFBOption == AUTO_FB_0) {
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
} else { // (byFBOption == AUTO_FB_1)
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
}
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime + uNextPktTime;
} else {
return pDevice->uSIFS + uNextPktTime;
}
}
break;
case DATADUR_A_F1: //DATADUR_A_F1
if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime;
} else {
return 0;
}
} else { //First Frag or Mid Frag
if (byFBOption == AUTO_FB_0) {
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
} else { // (byFBOption == AUTO_FB_1)
if (wRate < RATE_18M)
wRate = RATE_18M;
else if (wRate > RATE_54M)
wRate = RATE_54M;
if (uFragIdx == (uMACfragNum-2)) {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else {
uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
}
if (bNeedAck) {
uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
return pDevice->uSIFS + uAckTime + uNextPktTime;
} else {
return pDevice->uSIFS + uNextPktTime;
}
}
break;
default:
break;
}
ASSERT(false);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
unsigned int
s_uGetRTSCTSDuration(
PSDevice pDevice,
unsigned char byDurType,
unsigned int cbFrameLength,
unsigned char byPktType,
unsigned short wRate,
bool bNeedAck,
unsigned char byFBOption
)
{
unsigned int uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
case RTSDUR_BB: //RTSDuration_bb
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: //RTSDuration_ba
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: //RTSDuration_aa
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case CTSDUR_BA: //CTSDuration_ba
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA_F0: //RTSDuration_ba_f0
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_AA_F0: //RTSDuration_aa_f0
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_BA_F1: //RTSDuration_ba_f1
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
case RTSDUR_AA_F1: //RTSDuration_aa_f1
uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
case CTSDUR_BA_F0: //CTSDuration_ba_f0
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
}
break;
case CTSDUR_BA_F1: //CTSDuration_ba_f1
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
} else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M)) {
uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
}
break;
default:
break;
}
return uDurTime;
}
static
unsigned int
s_uFillDataHead(
PSDevice pDevice,
unsigned char byPktType,
void *pTxDataHead,
unsigned int cbFrameLength,
unsigned int uDMAIdx,
bool bNeedAck,
unsigned int uFragIdx,
unsigned int cbLastFragmentSize,
unsigned int uMACfragNum,
unsigned char byFBOption,
unsigned short wCurrentRate
)
{
unsigned short wLen = 0x0000;
if (pTxDataHead == NULL) {
return 0;
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
byPktType, wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption)); //1: 2.4GHz
pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
PK_TYPE_11B, pDevice->byTopCCKBasicRate,
bNeedAck, uFragIdx, cbLastFragmentSize,
uMACfragNum, byFBOption)); //1: 2.4
pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]);
return pBuf->wDuration_a;
} else {
// Auto Fallback
PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get Duration and TimeStamp
pBuf->wDuration_a = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_b = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_a_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wDuration_a_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //1: 2.4GHz
pBuf->wTimeStampOff_a = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
pBuf->wTimeStampOff_b = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE]);
return pBuf->wDuration_a;
} //if (byFBOption == AUTO_FB_NONE)
} else if (byPktType == PK_TYPE_11A) {
if ((byFBOption != AUTO_FB_NONE)) {
// Auto Fallback
PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wDuration_f0 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wDuration_f1 = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption)); //0: 5GHz
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return pBuf->wDuration;
} else {
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return pBuf->wDuration;
}
} else {
PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration and TimeStampOff
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
wCurrentRate, bNeedAck, uFragIdx,
cbLastFragmentSize, uMACfragNum,
byFBOption));
pBuf->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
return pBuf->wDuration;
}
return 0;
}
static
void
s_vFillRTSHead(
PSDevice pDevice,
unsigned char byPktType,
void *pvRTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
PSEthernetHeader psEthHeader,
unsigned short wCurrentRate,
unsigned char byFBOption
)
{
unsigned int uRTSFrameLen = 20;
unsigned short wLen = 0x0000;
if (pvRTS == NULL)
return;
if (bDisCRC) {
// When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame,
// in this case we need to decrease its length by 4.
uRTSFrameLen -= 4;
}
// Note: So far RTSHead dosen't appear in ATIM & Beacom DMA, so we don't need to take them into account.
// Otherwise, we need to modify codes for them.
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_g pBuf = (PSRTS_g)pvRTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
} else {
PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_a), (unsigned char *)&(pBuf->bySignalField_a)
);
pBuf->wTransmitLength_a = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->wDuration_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
pBuf->wRTSDuration_ba_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_aa_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_ba_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
pBuf->wRTSDuration_aa_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
} // if (byFBOption == AUTO_FB_NONE)
} else if (byPktType == PK_TYPE_11A) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
} else {
PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
pBuf->wRTSDuration_f0 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
pBuf->wRTSDuration_f1 = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
} else if (byPktType == PK_TYPE_11B) {
PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField), (unsigned char *)&(pBuf->bySignalField)
);
pBuf->wTransmitLength = cpu_to_le16(wLen);
//Get Duration
pBuf->wDuration = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
pBuf->Data.wDurationID = pBuf->wDuration;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
}
static
void
s_vFillCTSHead(
PSDevice pDevice,
unsigned int uDMAIdx,
unsigned char byPktType,
void *pvCTS,
unsigned int cbFrameLength,
bool bNeedAck,
bool bDisCRC,
unsigned short wCurrentRate,
unsigned char byFBOption
)
{
unsigned int uCTSFrameLen = 14;
unsigned short wLen = 0x0000;
if (pvCTS == NULL) {
return;
}
if (bDisCRC) {
// When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame,
// in this case we need to decrease its length by 4.
uCTSFrameLen -= 4;
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) {
// Auto Fall back
PSCTS_FB pBuf = (PSCTS_FB)pvCTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
pBuf->wDuration_ba = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
//Get CTSDuration_ba_f0
pBuf->wCTSDuration_ba_f0 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0);
//Get CTSDuration_ba_f1
pBuf->wCTSDuration_ba_f1 = (unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration;
pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1);
//Get CTS Frame body
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
PSCTS pBuf = (PSCTS)pvCTS;
//Get SignalField,ServiceField,Length
BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
(unsigned short *)&(wLen), (unsigned char *)&(pBuf->byServiceField_b), (unsigned char *)&(pBuf->bySignalField_b)
);
pBuf->wTransmitLength_b = cpu_to_le16(wLen);
//Get CTSDuration_ba
pBuf->wDuration_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
pBuf->wDuration_ba += pDevice->wCTSDuration;
pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
//Get CTS Frame body
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
}
}
}
/*+
*
* Description:
* Generate FIFO control for MAC & Baseband controller
*
* Parameters:
* In:
* pDevice - Pointer to adapter
* pTxDataHead - Transmit Data Buffer
* pTxBufHead - pTxBufHead
* pvRrvTime - pvRrvTime
* pvRTS - RTS Buffer
* pCTS - CTS Buffer
* cbFrameSize - Transmit Data Length (Hdr+Payload+FCS)
* bNeedACK - If need ACK
* uDescIdx - Desc Index
* Out:
* none
*
* Return Value: none
*
-*/
// unsigned int cbFrameSize,//Hdr+Payload+FCS
static
void
s_vGenerateTxParameter(
PSDevice pDevice,
unsigned char byPktType,
void *pTxBufHead,
void *pvRrvTime,
void *pvRTS,
void *pvCTS,
unsigned int cbFrameSize,
bool bNeedACK,
unsigned int uDMAIdx,
PSEthernetHeader psEthHeader,
unsigned short wCurrentRate
)
{
unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
unsigned short wFifoCtl;
bool bDisCRC = false;
unsigned char byFBOption = AUTO_FB_NONE;
// unsigned short wCurrentRate = pDevice->wCurrentRate;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_vGenerateTxParameter...\n");
PSTxBufHead pFifoHead = (PSTxBufHead)pTxBufHead;
pFifoHead->wReserved = wCurrentRate;
wFifoCtl = pFifoHead->wFIFOCtl;
if (wFifoCtl & FIFOCTL_CRCDIS) {
bDisCRC = true;
}
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
} else if (wFifoCtl & FIFOCTL_AUTO_FB_1) {
byFBOption = AUTO_FB_1;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (pvRTS != NULL) { //RTS_need
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime;
pBuf->wRTSTxRrvTime_aa = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
pBuf->wRTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
pBuf->wRTSTxRrvTime_bb = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else {//RTS_needless, PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime;
pBuf->wTxRrvTime_a = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
pBuf->wTxRrvTime_b = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
pBuf->wCTSTxRrvTime_ba = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
}
//Fill CTS
s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption);
}
} else if (byPktType == PK_TYPE_11A) {
if (pvRTS != NULL) {//RTS_need, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else if (pvRTS == NULL) {//RTS_needless, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
}
}
} else if (byPktType == PK_TYPE_11B) {
if ((pvRTS != NULL)) {//RTS_need, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wRTSTxRrvTime = cpu_to_le16((unsigned short)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
}
//Fill RTS
s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
} else { //RTS_needless, non PCF mode
//Fill RsvTime
if (pvRrvTime) {
PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
pBuf->wTxRrvTime = cpu_to_le16((unsigned short)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
}
}
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_vGenerateTxParameter END.\n");
}
/*
unsigned char *pbyBuffer,//point to pTxBufHead
unsigned short wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last
unsigned int cbFragmentSize,//Hdr+payoad+FCS
*/
static
void
s_vFillFragParameter(
PSDevice pDevice,
unsigned char *pbyBuffer,
unsigned int uTxType,
void *pvtdCurr,
unsigned short wFragType,
unsigned int cbReqCount
)
{
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_vFillFragParameter...\n");
if (uTxType == TYPE_SYNCDMA) {
//PSTxSyncDesc ptdCurr = (PSTxSyncDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx);
PSTxSyncDesc ptdCurr = (PSTxSyncDesc)pvtdCurr;
//Set FIFOCtl & TimeStamp in TxSyncDesc
ptdCurr->m_wFIFOCtl = pTxBufHead->wFIFOCtl;
ptdCurr->m_wTimeStamp = pTxBufHead->wTimeStamp;
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
} else {
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
}
} else {
//PSTxDesc ptdCurr = (PSTxDesc)s_pvGetTxDescHead(pDevice, uTxType, uCurIdx);
PSTxDesc ptdCurr = (PSTxDesc)pvtdCurr;
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
if (wFragType == FRAGCTL_ENDFRAG) { //Last Fragmentation
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
} else {
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP);
}
}
pTxBufHead->wFragCtl |= (unsigned short)wFragType;//0x0001; //0000 0000 0000 0001
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_vFillFragParameter END\n");
}
static unsigned int
s_cbFillTxBufHead(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
unsigned int cbFrameBodySize, unsigned int uDMAIdx, PSTxDesc pHeadTD,
PSEthernetHeader psEthHeader, unsigned char *pPacket, bool bNeedEncrypt,
PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum)
{
unsigned int cbMACHdLen;
unsigned int cbFrameSize;
unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
unsigned int cbFragPayloadSize;
unsigned int cbLastFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
unsigned int cbLastFragPayloadSize;
unsigned int uFragIdx;
unsigned char *pbyPayloadHead;
unsigned char *pbyIVHead;
unsigned char *pbyMacHdr;
unsigned short wFragType; //00:Non-Frag, 01:Start, 10:Mid, 11:Last
unsigned int uDuration;
unsigned char *pbyBuffer;
// unsigned int uKeyEntryIdx = NUM_KEY_ENTRY+1;
// unsigned char byKeySel = 0xFF;
unsigned int cbIVlen = 0;
unsigned int cbICVlen = 0;
unsigned int cbMIClen = 0;
unsigned int cbFCSlen = 4;
unsigned int cb802_1_H_len = 0;
unsigned int uLength = 0;
unsigned int uTmpLen = 0;
// unsigned char abyTmp[8];
// unsigned long dwCRC;
unsigned int cbMICHDR = 0;
unsigned long dwMICKey0, dwMICKey1;
unsigned long dwMIC_Priority;
unsigned long *pdwMIC_L;
unsigned long *pdwMIC_R;
unsigned long dwSafeMIC_L, dwSafeMIC_R; //Fix "Last Frag Size" < "MIC length".
bool bMIC2Frag = false;
unsigned int uMICFragLen = 0;
unsigned int uMACfragNum = 1;
unsigned int uPadding = 0;
unsigned int cbReqCount = 0;
bool bNeedACK;
bool bRTS;
bool bIsAdhoc;
unsigned char *pbyType;
PSTxDesc ptdCurr;
PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
// unsigned int tmpDescIdx;
unsigned int cbHeaderLength = 0;
void *pvRrvTime;
PSMICHDRHead pMICHDR;
void *pvRTS;
void *pvCTS;
void *pvTxDataHd;
unsigned short wTxBufSize; // FFinfo size
unsigned int uTotalCopyLength = 0;
unsigned char byFBOption = AUTO_FB_NONE;
bool bIsWEP256 = false;
PSMgmtObject pMgmt = pDevice->pMgmt;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "s_cbFillTxBufHead...\n");
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
bNeedACK = false;
else
bNeedACK = true;
bIsAdhoc = true;
} else {
// MSDUs in Infra mode always need ACK
bNeedACK = true;
bIsAdhoc = false;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
else
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
if ((bNeedEncrypt == true) && (pTransmitKey != NULL)) {
if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
cbIVlen = 4;
cbICVlen = 4;
if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
bIsWEP256 = true;
}
}
if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
}
if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
}
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMACHdLen%4);
uPadding %= 4;
}
}
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
if ((bNeedACK == false) ||
(cbFrameSize < pDevice->wRTSThreshold) ||
((cbFrameSize >= pDevice->wFragmentationThreshold) && (pDevice->wFragmentationThreshold <= pDevice->wRTSThreshold))
) {
bRTS = false;
} else {
bRTS = true;
psTxBufHd->wFIFOCtl |= (FIFOCTL_RTS | FIFOCTL_LRETRY);
}
//
// Use for AUTO FALL BACK
//
if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
} else if (psTxBufHd->wFIFOCtl & FIFOCTL_AUTO_FB_1) {
byFBOption = AUTO_FB_1;
}
//////////////////////////////////////////////////////
//Set RrvTime/RTS/CTS Buffer
wTxBufSize = sizeof(STxBufHead);
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g) + sizeof(STxDataHead_g);
} else { //RTS_needless
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB);
} else { //RTS_needless
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB));
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB) + sizeof(STxDataHead_g_FB);
}
} // Auto Fall Back
} else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab);
} else { //RTS_needless, need MICHDR
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB));
cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB);
} else { //RTS_needless
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_a_FB);
}
} // Auto Fall Back
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
//////////////////////////////////////////////////////////////////
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
if (pDevice->pMgmt->eAuthenMode == WMAC_AUTH_WPANONE) {
dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
} else if ((pTransmitKey->dwKeyIndex & AUTHENTICATOR_KEY) != 0) {
dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
} else {
dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[24]);
dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[28]);
}
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(psEthHeader->abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
}
///////////////////////////////////////////////////////////////////
pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderLength);
pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding + cbIVlen);
pbyIVHead = (unsigned char *)(pbyMacHdr + cbMACHdLen + uPadding);
if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true) && (bIsWEP256 == false)) {
// Fragmentation
// FragThreshold = Fragment size(Hdr+(IV)+fragment payload+(MIC)+(ICV)+FCS)
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
//FragNum = (FrameSize-(Hdr+FCS))/(Fragment Size -(Hrd+FCS)))
uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
} else {
uMACfragNum++;
}
//[Hdr+(IV)+last fragment payload+(MIC)+(ICV)+FCS]
cbLastFragmentSize = cbMACHdLen + cbLastFragPayloadSize + cbIVlen + cbICVlen + cbFCSlen;
for (uFragIdx = 0; uFragIdx < uMACfragNum; uFragIdx++) {
if (uFragIdx == 0) {
//=========================
// Start Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Start Fragmentation...\n");
wFragType = FRAGCTL_STAFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
//Fill IV(ExtIV,RSNHDR)
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
// 802.1H
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
} else {
memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
pbyType = (unsigned char *)(pbyPayloadHead + 6);
memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
cb802_1_H_len = 8;
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len));
uTotalCopyLength += cbFragPayloadSize - cb802_1_H_len;
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Start MIC: %d\n", cbFragPayloadSize);
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFragPayloadSize);
}
//---------------------------
// S/W Encryption
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len), (unsigned short)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
} else if (uFragIdx == (uMACfragNum-1)) {
//=========================
// Last Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last Fragmentation...\n");
//tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx];
wFragType = FRAGCTL_ENDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (unsigned short)cbLastFragPayloadSize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbLastFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
//pbyBuffer = (unsigned char *)pDevice->aamTxBuf[uDMAIdx][tmpDescIdx].pbyVAddr;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
if (bMIC2Frag == false) {
memcpy((pbyBuffer + uLength),
(pPacket + 14 + uTotalCopyLength),
(cbLastFragPayloadSize - cbMIClen)
);
//TODO check uTmpLen !
uTmpLen = cbLastFragPayloadSize - cbMIClen;
}
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "LAST: uMICFragLen:%d, cbLastFragPayloadSize:%d, uTmpLen:%d\n",
uMICFragLen, cbLastFragPayloadSize, uTmpLen);
if (bMIC2Frag == false) {
if (uTmpLen != 0)
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Last MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
} else {
if (uMICFragLen >= 4) {
memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "LAST: uMICFragLen >= 4: %X, %d\n",
*(unsigned char *)((unsigned char *)&dwSafeMIC_R + (uMICFragLen - 4)),
(cbMIClen - uMICFragLen));
} else {
memcpy((pbyBuffer + uLength), ((unsigned char *)&dwSafeMIC_L + uMICFragLen),
(4 - uMICFragLen));
memcpy((pbyBuffer + uLength + (4 - uMICFragLen)), &dwSafeMIC_R, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "LAST: uMICFragLen < 4: %X, %d\n",
*(unsigned char *)((unsigned char *)&dwSafeMIC_R + uMICFragLen - 4),
(cbMIClen - uMICFragLen));
}
/*
for (ii = 0; ii < cbLastFragPayloadSize + 8 + 24; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii - 8 - 24)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n\n");
*/
}
MIC_vUnInit();
} else {
ASSERT(uTmpLen == (cbLastFragPayloadSize - cbMIClen));
}
//---------------------------
// S/W Encryption
//---------------------------
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbLastFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
} else {
//=========================
// Middle Fragmentation
//=========================
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Middle Fragmentation...\n");
//tmpDescIdx = (uDescIdx + uFragIdx) % pDevice->cbTD[uDMAIdx];
wFragType = FRAGCTL_MIDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, uFragIdx);
if (bNeedEncrypt == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (unsigned short)cbFragPayloadSize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cbFragPayloadSize;
//---------------------------
// S/W or H/W Encryption
//---------------------------
pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
(pPacket + 14 + uTotalCopyLength),
cbFragPayloadSize
);
uTmpLen = cbFragPayloadSize;
uTotalCopyLength += uTmpLen;
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
MIC_vAppend((pbyBuffer + uLength), uTmpLen);
if (uTmpLen < cbFragPayloadSize) {
bMIC2Frag = true;
uMICFragLen = cbFragPayloadSize - uTmpLen;
ASSERT(uMICFragLen < cbMIClen);
pdwMIC_L = (unsigned long *)(pbyBuffer + uLength + uTmpLen);
pdwMIC_R = (unsigned long *)(pbyBuffer + uLength + uTmpLen + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
dwSafeMIC_L = *pdwMIC_L;
dwSafeMIC_R = *pdwMIC_R;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIDDLE: uMICFragLen:%d, cbFragPayloadSize:%d, uTmpLen:%d\n",
uMICFragLen, cbFragPayloadSize, uTmpLen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fill MIC in Middle frag [%d]\n", uMICFragLen);
/*
for (ii = 0; ii < uMICFragLen; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *((unsigned char *)((pbyBuffer + uLength + uTmpLen) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n");
*/
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get MIC:%lX, %lX\n", *pdwMIC_L, *pdwMIC_R);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Middle frag len: %d\n", uTmpLen);
/*
for (ii = 0; ii < uTmpLen; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n\n");
*/
} else {
ASSERT(uTmpLen == (cbFragPayloadSize));
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength), (unsigned short)cbFragPayloadSize);
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
//--------------------
//1.Set TSR1 & ReqCount in TxDescHead
//2.Set FragCtl in TxBufferHead
//3.Set Frame Control
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
pDevice->iTDUsed[uDMAIdx]++;
pHeadTD = ptdCurr->next;
}
} // for (uMACfragNum)
} else {
//=========================
// No Fragmentation
//=========================
//DBG_PRTGRP03(("No Fragmentation...\n"));
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "No Fragmentation...\n");
wFragType = FRAGCTL_NONFRAG;
//Set FragCtl in TxBufferHead
psTxBufHd->wFragCtl |= (unsigned short)wFragType;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
0, 0, uMACfragNum, byFBOption, pDevice->wCurrentRate);
// Generate TX MAC Header
vGenerateMACHeader(pDevice, pbyMacHdr, (unsigned short)uDuration, psEthHeader, bNeedEncrypt,
wFragType, uDMAIdx, 0);
if (bNeedEncrypt == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (unsigned char *)(psTxBufHd->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
}
// 802.1H
if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
} else {
memcpy((unsigned char *)(pbyPayloadHead), &pDevice->abySNAP_RFC1042[0], 6);
}
pbyType = (unsigned char *)(pbyPayloadHead + 6);
memcpy(pbyType, &(psEthHeader->wType), sizeof(unsigned short));
cb802_1_H_len = 8;
}
cbReqCount = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen);
//---------------------------
// S/W or H/W Encryption
//---------------------------
pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
(pPacket + 14),
cbFrameBodySize - cb802_1_H_len
);
if ((bNeedEncrypt == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Length:%d, %d\n", cbFrameBodySize - cb802_1_H_len, uLength);
/*
for (ii = 0; ii < (cbFrameBodySize - cb802_1_H_len); ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *((unsigned char *)((pbyBuffer + uLength) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n");
*/
MIC_vAppend((pbyBuffer + uLength - cb802_1_H_len), cbFrameBodySize);
pdwMIC_L = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize);
pdwMIC_R = (unsigned long *)(pbyBuffer + uLength - cb802_1_H_len + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
pDevice->bTxMICFail = false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderLength, uPadding, cbIVlen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
/*
for (ii = 0; ii < 8; ii++) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", *(((unsigned char *)(pdwMIC_L) + ii)));
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "\n");
*/
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
if (bNeedEncrypt) {
s_vSWencryption(pDevice, pTransmitKey, (pbyBuffer + uLength - cb802_1_H_len),
(unsigned short)(cbFrameBodySize + cbMIClen));
cbReqCount += cbICVlen;
}
}
ptdCurr = (PSTxDesc)pHeadTD;
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
//Set TSR1 & ReqCount in TxDescHead
ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
pDevice->iTDUsed[uDMAIdx]++;
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " ptdCurr->m_dwReserved0[%d] ptdCurr->m_dwReserved1[%d].\n", ptdCurr->pTDInfo->dwReqCount, ptdCurr->pTDInfo->dwHeaderLength);
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " cbHeaderLength[%d]\n", cbHeaderLength);
}
*puMACfragNum = uMACfragNum;
//DBG_PRTGRP03(("s_cbFillTxBufHead END\n"));
return cbHeaderLength;
}
void
vGenerateFIFOHeader(PSDevice pDevice, unsigned char byPktType, unsigned char *pbyTxBufferAddr,
bool bNeedEncrypt, unsigned int cbPayloadSize, unsigned int uDMAIdx,
PSTxDesc pHeadTD, PSEthernetHeader psEthHeader, unsigned char *pPacket,
PSKeyItem pTransmitKey, unsigned int uNodeIndex, unsigned int *puMACfragNum,
unsigned int *pcbHeaderSize)
{
unsigned int wTxBufSize; // FFinfo size
bool bNeedACK;
bool bIsAdhoc;
unsigned short cbMacHdLen;
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
//Set FIFOCTL_NEEDACK
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0]))) {
bNeedACK = false;
pTxBufHead->wFIFOCtl = pTxBufHead->wFIFOCtl & (~FIFOCTL_NEEDACK);
} else {
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
}
bIsAdhoc = true;
} else {
// MSDUs in Infra mode always need ACK
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
bIsAdhoc = false;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
//Set FIFOCTL_LHEAD
if (pDevice->bLongHeader)
pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
//Set FIFOCTL_GENINT
pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
//Set FIFOCTL_ISDMA0
if (TYPE_TXDMA0 == uDMAIdx) {
pTxBufHead->wFIFOCtl |= FIFOCTL_ISDMA0;
}
//Set FRAGCTL_MACHDCNT
if (pDevice->bLongHeader) {
cbMacHdLen = WLAN_HDR_ADDR3_LEN + 6;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
;
} else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
} else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
} else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//Set Auto Fallback Ctl
if (pDevice->wCurrentRate >= RATE_18M) {
if (pDevice->byAutoFBCtrl == AUTO_FB_0) {
pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_0;
} else if (pDevice->byAutoFBCtrl == AUTO_FB_1) {
pTxBufHead->wFIFOCtl |= FIFOCTL_AUTO_FB_1;
}
}
//Set FRAGCTL_WEPTYP
pDevice->bAES = false;
//Set FRAGCTL_WEPTYP
if (pDevice->byLocalID > REV_ID_VT3253_A1) {
if ((bNeedEncrypt) && (pTransmitKey != NULL)) { //WEP enabled
if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) { //WEP40 or WEP104
if (pTransmitKey->uKeyLength != WLAN_WEP232_KEYLEN)
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { //CCMP
pTxBufHead->wFragCtl |= FRAGCTL_AES;
}
}
}
#ifdef PLICE_DEBUG
RFbSetPower(pDevice, pDevice->wCurrentRate, pDevice->byCurrentCh);
#endif
pTxBufHead->byTxPower = pDevice->byCurPwr;
/*
if (pDevice->bEnableHostWEP)
pTxBufHead->wFragCtl &= ~(FRAGCTL_TKIP | FRAGCTL_LEGACY |FRAGCTL_AES);
*/
*pcbHeaderSize = s_cbFillTxBufHead(pDevice, byPktType, pbyTxBufferAddr, cbPayloadSize,
uDMAIdx, pHeadTD, psEthHeader, pPacket, bNeedEncrypt,
pTransmitKey, uNodeIndex, puMACfragNum);
return;
}
/*+
*
* Description:
* Translate 802.3 to 802.11 header
*
* Parameters:
* In:
* pDevice - Pointer to adapter
* dwTxBufferAddr - Transmit Buffer
* pPacket - Packet from upper layer
* cbPacketSize - Transmit Data Length
* Out:
* pcbHeadSize - Header size of MAC&Baseband control and 802.11 Header
* pcbAppendPayload - size of append payload for 802.1H translation
*
* Return Value: none
*
-*/
void
vGenerateMACHeader(
PSDevice pDevice,
unsigned char *pbyBufferAddr,
unsigned short wDuration,
PSEthernetHeader psEthHeader,
bool bNeedEncrypt,
unsigned short wFragType,
unsigned int uDMAIdx,
unsigned int uFragIdx
)
{
PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
memset(pMACHeader, 0, (sizeof(S802_11Header))); //- sizeof(pMACHeader->dwIV)));
if (uDMAIdx == TYPE_ATIMDMA) {
pMACHeader->wFrameCtl = TYPE_802_11_ATIM;
} else {
pMACHeader->wFrameCtl = TYPE_802_11_DATA;
}
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_FROMDS;
} else {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
} else {
memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_TODS;
}
}
if (bNeedEncrypt)
pMACHeader->wFrameCtl |= cpu_to_le16((unsigned short)WLAN_SET_FC_ISWEP(1));
pMACHeader->wDurationID = cpu_to_le16(wDuration);
if (pDevice->bLongHeader) {
PWLAN_80211HDR_A4 pMACA4Header = (PWLAN_80211HDR_A4) pbyBufferAddr;
pMACHeader->wFrameCtl |= (FC_TODS | FC_FROMDS);
memcpy(pMACA4Header->abyAddr4, pDevice->abyBSSID, WLAN_ADDR_LEN);
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
//Set FragNumber in Sequence Control
pMACHeader->wSeqCtl |= cpu_to_le16((unsigned short)uFragIdx);
if ((wFragType == FRAGCTL_ENDFRAG) || (wFragType == FRAGCTL_NONFRAG)) {
pDevice->wSeqCounter++;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
}
if ((wFragType == FRAGCTL_STAFRAG) || (wFragType == FRAGCTL_MIDFRAG)) { //StartFrag or MidFrag
pMACHeader->wFrameCtl |= FC_MOREFRAG;
}
}
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
PSTxDesc pFrstTD;
unsigned char byPktType;
unsigned char *pbyTxBufferAddr;
void *pvRTS;
PSCTS pCTS;
void *pvTxDataHd;
unsigned int uDuration;
unsigned int cbReqCount;
PS802_11Header pMACHeader;
unsigned int cbHeaderSize;
unsigned int cbFrameBodySize;
bool bNeedACK;
bool bIsPSPOLL = false;
PSTxBufHead pTxBufHead;
unsigned int cbFrameSize;
unsigned int cbIVlen = 0;
unsigned int cbICVlen = 0;
unsigned int cbMIClen = 0;
unsigned int cbFCSlen = 4;
unsigned int uPadding = 0;
unsigned short wTxBufSize;
unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
void *pvRrvTime;
void *pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned short wCurrentRate = RATE_1M;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
return CMD_STATUS_RESOURCES;
}
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_1M;
byPktType = PK_TYPE_11B;
}
// SetPower will cause error power TX state for OFDM Date packet in TX buffer.
// 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
// And cmd timer will wait data pkt TX finish before scanning so it's OK
// to set power here.
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
} else {
RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
}
pTxBufHead->byTxPower = pDevice->byCurPwr;
//+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
if (pDevice->byFOETuning) {
if ((pPacket->p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
wCurrentRate = RATE_24M;
byPktType = PK_TYPE_11GA;
}
}
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxBufHead->wFIFOCtl = 0;
} else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
} else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
} else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (is_multicast_ether_addr(&(pPacket->p80211Header->sA3.abyAddr1[0])))
bNeedACK = false;
else {
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
}
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((pPacket->p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
//Set FRAGCTL_MACHDCNT
pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)(cbMacHdLen << 10));
// Notes:
// Although spec says MMPDU can be fragmented; In most cases,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
cbIVlen = 4;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
//We need to get seed here for filling TxKey entry.
//TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
// pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
uPadding %= 4;
}
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen;
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
//Set RrvTime/RTS/CTS Buffer
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
pvRTS = NULL;
pCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS) + sizeof(STxDataHead_g);
} else { // 802.11a/b packet
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
pvRTS = NULL;
pCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
0, 0, 1, AUTO_FB_NONE, wCurrentRate);
pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + cbFrameBodySize;
if (WLAN_GET_FC_ISWEP(pPacket->p80211Header->sA4.wFrameCtl) != 0) {
unsigned char *pbyIVHead;
unsigned char *pbyPayloadHead;
unsigned char *pbyBSSID;
PSKeyItem pTransmitKey = NULL;
pbyIVHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding);
pbyPayloadHead = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize + cbMacHdLen + uPadding + cbIVlen);
//Fill TXKEY
//Kyle: Need fix: TKIP and AES did't encrypt Mnt Packet.
//s_vFillTxKey(pDevice, (unsigned char *)pTxBufHead->adwTxKey, NULL);
//Fill IV(ExtIV,RSNHDR)
//s_vFillPrePayload(pDevice, pbyIVHead, NULL);
//---------------------------
// S/W or H/W Encryption
//---------------------------
do {
if ((pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) &&
(pDevice->bLinkPass == true)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get GTK.\n");
break;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get PTK.\n");
break;
}
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KEY is NULL. OP Mode[%d]\n", pDevice->eOPMode);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Get GTK.\n");
}
} while (false);
//Fill TXKEY
s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
(unsigned char *)pMACHeader, (unsigned short)cbFrameBodySize, NULL);
memcpy(pMACHeader, pPacket->p80211Header, cbMacHdLen);
memcpy(pbyPayloadHead, ((unsigned char *)(pPacket->p80211Header) + cbMacHdLen),
cbFrameBodySize);
} else {
// Copy the Packet into a tx Buffer
memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
if (bIsPSPOLL) {
// The MAC will automatically replace the Duration-field of MAC header by Duration-field
// of FIFO control header.
// This will cause AID-field of PS-POLL packet to be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
} else {
((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
}
}
// first TD is the only TD
//Set TSR1 & ReqCount in TxDescHead
pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
pFrstTD->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
pFrstTD->pTDInfo->byFlags = 0;
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
pDevice->iTDUsed[TYPE_TXDMA0]++;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n");
}
pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
#ifdef TxInSleep
pDevice->nTxDataTimeCout = 0; //2008-8-21 chester <add> for send null packet
#endif
// Poll Transmit the adapter
MACvTransmit0(pDevice->PortOffset);
return CMD_STATUS_PENDING;
}
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
unsigned char byPktType;
unsigned char *pbyBuffer = (unsigned char *)pDevice->tx_beacon_bufs;
unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
unsigned int cbHeaderSize = 0;
unsigned short wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead = (PSTxShortBufHead) pbyBuffer;
PSTxDataHead_ab pTxDataHead = (PSTxDataHead_ab) (pbyBuffer + wTxBufSize);
PS802_11Header pMACHeader;
unsigned short wCurrentRate;
unsigned short wLen = 0x0000;
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_2M;
byPktType = PK_TYPE_11B;
}
//Set Preamble type always long
pDevice->byPreambleType = PREAMBLE_LONG;
//Set FIFOCTL_GENINT
pTxBufHead->wFIFOCtl |= FIFOCTL_GENINT;
//Set packet type & Get Duration
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, byPktType,
wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
} else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
pTxDataHead->wDuration = cpu_to_le16((unsigned short)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, byPktType,
wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
}
BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, byPktType,
(unsigned short *)&(wLen), (unsigned char *)&(pTxDataHead->byServiceField), (unsigned char *)&(pTxDataHead->bySignalField)
);
pTxDataHead->wTransmitLength = cpu_to_le16(wLen);
//Get TimeStampOff
pTxDataHead->wTimeStampOff = cpu_to_le16(wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE]);
cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
//Generate Beacon Header
pMACHeader = (PS802_11Header)(pbyBuffer + cbHeaderSize);
memcpy(pMACHeader, pPacket->p80211Header, pPacket->cbMPDULen);
pMACHeader->wDurationID = 0;
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
// Set Beacon buffer length
pDevice->wBCNBufLen = pPacket->cbMPDULen + cbHeaderSize;
MACvSetCurrBCNTxDescAddr(pDevice->PortOffset, (pDevice->tx_beacon_dma));
MACvSetCurrBCNLength(pDevice->PortOffset, pDevice->wBCNBufLen);
// Set auto Transmit on
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
// Poll Transmit the adapter
MACvTransmitBCN(pDevice->PortOffset);
return CMD_STATUS_PENDING;
}
unsigned int
cbGetFragCount(
PSDevice pDevice,
PSKeyItem pTransmitKey,
unsigned int cbFrameBodySize,
PSEthernetHeader psEthHeader
)
{
unsigned int cbMACHdLen;
unsigned int cbFrameSize;
unsigned int cbFragmentSize; //Hdr+(IV)+payoad+(MIC)+(ICV)+FCS
unsigned int cbFragPayloadSize;
unsigned int cbLastFragPayloadSize;
unsigned int cbIVlen = 0;
unsigned int cbICVlen = 0;
unsigned int cbMIClen = 0;
unsigned int cbFCSlen = 4;
unsigned int uMACfragNum = 1;
bool bNeedACK;
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
if (is_multicast_ether_addr(&(psEthHeader->abyDstAddr[0])))
bNeedACK = false;
else
bNeedACK = true;
} else {
// MSDUs in Infra mode always need ACK
bNeedACK = true;
}
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
else
cbMACHdLen = WLAN_HDR_ADDR3_LEN;
if (pDevice->bEncryptionEnable == true) {
if (pTransmitKey == NULL) {
if ((pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) ||
(pDevice->pMgmt->eAuthenMode < WMAC_AUTH_WPA)) {
cbIVlen = 4;
cbICVlen = 4;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
}
} else if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
cbIVlen = 4;
cbICVlen = 4;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
} else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
}
}
cbFrameSize = cbMACHdLen + cbIVlen + (cbFrameBodySize + cbMIClen) + cbICVlen + cbFCSlen;
if ((cbFrameSize > pDevice->wFragmentationThreshold) && (bNeedACK == true)) {
// Fragmentation
cbFragmentSize = pDevice->wFragmentationThreshold;
cbFragPayloadSize = cbFragmentSize - cbMACHdLen - cbIVlen - cbICVlen - cbFCSlen;
uMACfragNum = (unsigned short) ((cbFrameBodySize + cbMIClen) / cbFragPayloadSize);
cbLastFragPayloadSize = (cbFrameBodySize + cbMIClen) % cbFragPayloadSize;
if (cbLastFragPayloadSize == 0) {
cbLastFragPayloadSize = cbFragPayloadSize;
} else {
uMACfragNum++;
}
}
return uMACfragNum;
}
void
vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, unsigned char *pbMPDU, unsigned int cbMPDULen) {
PSTxDesc pFrstTD;
unsigned char byPktType;
unsigned char *pbyTxBufferAddr;
void *pvRTS;
void *pvCTS;
void *pvTxDataHd;
unsigned int uDuration;
unsigned int cbReqCount;
PS802_11Header pMACHeader;
unsigned int cbHeaderSize;
unsigned int cbFrameBodySize;
bool bNeedACK;
bool bIsPSPOLL = false;
PSTxBufHead pTxBufHead;
unsigned int cbFrameSize;
unsigned int cbIVlen = 0;
unsigned int cbICVlen = 0;
unsigned int cbMIClen = 0;
unsigned int cbFCSlen = 4;
unsigned int uPadding = 0;
unsigned int cbMICHDR = 0;
unsigned int uLength = 0;
unsigned long dwMICKey0, dwMICKey1;
unsigned long dwMIC_Priority;
unsigned long *pdwMIC_L;
unsigned long *pdwMIC_R;
unsigned short wTxBufSize;
unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
void *pvRrvTime;
void *pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned short wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
unsigned int uNodeIndex = 0;
bool bNodeExist = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
unsigned char *pbyIVHead;
unsigned char *pbyPayloadHead;
unsigned char *pbyMacHdr;
unsigned int cbExtSuppRate = 0;
// PWLAN_IE pItem;
pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
if (cbMPDULen <= WLAN_HDR_ADDR3_LEN) {
cbFrameBodySize = 0;
} else {
cbFrameBodySize = cbMPDULen - WLAN_HDR_ADDR3_LEN;
}
p80211Header = (PUWLAN_80211HDR)pbMPDU;
pFrstTD = pDevice->apCurrTD[TYPE_TXDMA0];
pbyTxBufferAddr = (unsigned char *)pFrstTD->pTDInfo->buf;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
wCurrentRate = RATE_6M;
byPktType = PK_TYPE_11A;
} else {
wCurrentRate = RATE_1M;
byPktType = PK_TYPE_11B;
}
// SetPower will cause error power TX state for OFDM Date packet in TX buffer.
// 2004.11.11 Kyle -- Using OFDM power to tx MngPkt will decrease the connection capability.
// And cmd timer will wait data pkt TX to finish before scanning so it's OK
// to set power here.
if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) {
RFbSetPower(pDevice, wCurrentRate, pDevice->byCurrentCh);
} else {
RFbSetPower(pDevice, wCurrentRate, pMgmt->uCurrChannel);
}
pTxBufHead->byTxPower = pDevice->byCurPwr;
//+++++++++++++++++++++ Patch VT3253 A1 performance +++++++++++++++++++++++++++
if (pDevice->byFOETuning) {
if ((p80211Header->sA3.wFrameCtl & TYPE_DATE_NULL) == TYPE_DATE_NULL) {
wCurrentRate = RATE_24M;
byPktType = PK_TYPE_11GA;
}
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vDMA0_tx_80211: p80211Header->sA3.wFrameCtl = %x \n", p80211Header->sA3.wFrameCtl);
//Set packet type
if (byPktType == PK_TYPE_11A) {//0000 0000 0000 0000
pTxBufHead->wFIFOCtl = 0;
} else if (byPktType == PK_TYPE_11B) {//0000 0001 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
} else if (byPktType == PK_TYPE_11GB) {//0000 0010 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GB;
} else if (byPktType == PK_TYPE_11GA) {//0000 0011 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_11GA;
}
pTxBufHead->wFIFOCtl |= FIFOCTL_TMOEN;
pTxBufHead->wTimeStamp = cpu_to_le16(DEFAULT_MGN_LIFETIME_RES_64us);
if (is_multicast_ether_addr(&(p80211Header->sA3.abyAddr1[0]))) {
bNeedACK = false;
if (pDevice->bEnableHostWEP) {
uNodeIndex = 0;
bNodeExist = true;
}
} else {
if (pDevice->bEnableHostWEP) {
if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, (unsigned char *)(p80211Header->sA3.abyAddr1), &uNodeIndex))
bNodeExist = true;
}
bNeedACK = true;
pTxBufHead->wFIFOCtl |= FIFOCTL_NEEDACK;
};
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) ||
(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)) {
pTxBufHead->wFIFOCtl |= FIFOCTL_LRETRY;
}
pTxBufHead->wFIFOCtl |= (FIFOCTL_GENINT | FIFOCTL_ISDMA0);
if ((p80211Header->sA4.wFrameCtl & TYPE_SUBTYPE_MASK) == TYPE_CTL_PSPOLL) {
bIsPSPOLL = true;
cbMacHdLen = WLAN_HDR_ADDR2_LEN;
} else {
cbMacHdLen = WLAN_HDR_ADDR3_LEN;
}
// hostapd deamon ext support rate patch
if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0) {
cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN;
}
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0) {
cbExtSuppRate += ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN;
}
if (cbExtSuppRate > 0) {
cbFrameBodySize = WLAN_ASSOCRESP_OFF_SUPP_RATES;
}
}
//Set FRAGCTL_MACHDCNT
pTxBufHead->wFragCtl |= cpu_to_le16((unsigned short)cbMacHdLen << 10);
// Notes:
// Although spec says MMPDU can be fragmented; In most cases,
// no one will send a MMPDU under fragmentation. With RTS may occur.
pDevice->bAES = false; //Set FRAGCTL_WEPTYP
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->eEncryptionStatus == Ndis802_11Encryption1Enabled) {
cbIVlen = 4;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_LEGACY;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
cbIVlen = 8;//IV+ExtIV
cbMIClen = 8;
cbICVlen = 4;
pTxBufHead->wFragCtl |= FRAGCTL_TKIP;
//We need to get seed here for filling TxKey entry.
//TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
// pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
cbMICHDR = sizeof(SMICHDRHead);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = true;
}
//MAC Header should be padding 0 to DW alignment.
uPadding = 4 - (cbMacHdLen%4);
uPadding %= 4;
}
cbFrameSize = cbMacHdLen + cbFrameBodySize + cbIVlen + cbMIClen + cbICVlen + cbFCSlen + cbExtSuppRate;
//Set FIFOCTL_GrpAckPolicy
if (pDevice->bGrpAckPolicy == true) {//0000 0100 0000 0000
pTxBufHead->wFIFOCtl |= FIFOCTL_GRPACK;
}
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
pvRTS = NULL;
pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
} else {//802.11a/b packet
pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
pvRTS = NULL;
pvCTS = NULL;
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
}
memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
pTxBufHead->wFragCtl |= (unsigned short)FRAGCTL_NONFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
0, 0, 1, AUTO_FB_NONE, wCurrentRate);
pMACHeader = (PS802_11Header) (pbyTxBufferAddr + cbHeaderSize);
cbReqCount = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen + (cbFrameBodySize + cbMIClen) + cbExtSuppRate;
pbyMacHdr = (unsigned char *)(pbyTxBufferAddr + cbHeaderSize);
pbyPayloadHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding + cbIVlen);
pbyIVHead = (unsigned char *)(pbyMacHdr + cbMacHdLen + uPadding);
// Copy the Packet into a tx Buffer
memcpy(pbyMacHdr, pbMPDU, cbMacHdLen);
// version set to 0, patch for hostapd deamon
pMACHeader->wFrameCtl &= cpu_to_le16(0xfffc);
memcpy(pbyPayloadHead, (pbMPDU + cbMacHdLen), cbFrameBodySize);
// replace support rate, patch for hostapd deamon(only support 11M)
if (WLAN_GET_FC_FSTYPE(p80211Header->sA4.wFrameCtl) == WLAN_FSTYPE_ASSOCRESP) {
if (cbExtSuppRate != 0) {
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len != 0)
memcpy((pbyPayloadHead + cbFrameBodySize),
pMgmt->abyCurrSuppRates,
((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN
);
if (((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len != 0)
memcpy((pbyPayloadHead + cbFrameBodySize) + ((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates)->len + WLAN_IEHDR_LEN,
pMgmt->abyCurrExtSuppRates,
((PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates)->len + WLAN_IEHDR_LEN
);
}
}
// Set wep
if (WLAN_GET_FC_ISWEP(p80211Header->sA4.wFrameCtl) != 0) {
if (pDevice->bEnableHostWEP) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
if ((pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
dwMICKey0 = *(unsigned long *)(&pTransmitKey->abyKey[16]);
dwMICKey1 = *(unsigned long *)(&pTransmitKey->abyKey[20]);
// DO Software Michael
MIC_vInit(dwMICKey0, dwMICKey1);
MIC_vAppend((unsigned char *)&(sEthHeader.abyDstAddr[0]), 12);
dwMIC_Priority = 0;
MIC_vAppend((unsigned char *)&dwMIC_Priority, 4);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
MIC_vAppend((pbyTxBufferAddr + uLength), cbFrameBodySize);
pdwMIC_L = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize);
pdwMIC_R = (unsigned long *)(pbyTxBufferAddr + uLength + cbFrameBodySize + 4);
MIC_vGetMIC(pdwMIC_L, pdwMIC_R);
MIC_vUnInit();
if (pDevice->bTxMICFail == true) {
*pdwMIC_L = 0;
*pdwMIC_R = 0;
pDevice->bTxMICFail = false;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "uLength: %d, %d\n", uLength, cbFrameBodySize);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
}
s_vFillTxKey(pDevice, (unsigned char *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
pbyMacHdr, (unsigned short)cbFrameBodySize, (unsigned char *)pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0 = pTransmitKey->wTSC15_0;
}
if ((pDevice->byLocalID <= REV_ID_VT3253_A1)) {
s_vSWencryption(pDevice, pTransmitKey, pbyPayloadHead, (unsigned short)(cbFrameBodySize + cbMIClen));
}
}
pMACHeader->wSeqCtl = cpu_to_le16(pDevice->wSeqCounter << 4);
pDevice->wSeqCounter++;
if (pDevice->wSeqCounter > 0x0fff)
pDevice->wSeqCounter = 0;
if (bIsPSPOLL) {
// The MAC will automatically replace the Duration-field of MAC header by Duration-field
// of FIFO control header.
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
} else {
((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(p80211Header->sA2.wDurationID);
}
}
// first TD is the only TD
//Set TSR1 & ReqCount in TxDescHead
pFrstTD->pTDInfo->skb = skb;
pFrstTD->m_td1TD1.byTCR = (TCR_STP | TCR_EDP | EDMSDU);
pFrstTD->pTDInfo->skb_dma = pFrstTD->pTDInfo->buf_dma;
pFrstTD->m_td1TD1.wReqCount = cpu_to_le16(cbReqCount);
pFrstTD->buff_addr = cpu_to_le32(pFrstTD->pTDInfo->skb_dma);
pFrstTD->pTDInfo->byFlags = 0;
pFrstTD->pTDInfo->byFlags |= TD_FLAGS_PRIV_SKB;
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
wmb();
pFrstTD->m_td0TD0.f1Owner = OWNED_BY_NIC;
wmb();
pDevice->iTDUsed[TYPE_TXDMA0]++;
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " available td0 <= 1\n");
}
pDevice->apCurrTD[TYPE_TXDMA0] = pFrstTD->next;
// Poll Transmit the adapter
MACvTransmit0(pDevice->PortOffset);
return;
}
| gpl-2.0 |
qriozum/kernel | arch/mn10300/kernel/irq.c | 2569 | 9974 | /* MN10300 Arch-specific interrupt handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/setup.h>
#include <asm/serial-regs.h>
unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
#ifdef CONFIG_SMP
static char irq_affinity_online[NR_IRQS] = {
[0 ... NR_IRQS - 1] = 0
};
#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
[0 ... NR_IRQ_WORDS - 1] = 0
};
#endif /* CONFIG_SMP */
atomic_t irq_err_count;
/*
* MN10300 interrupt controller operations
*/
static void mn10300_cpupic_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void __mask_and_set_icr(unsigned int irq,
unsigned int mask, unsigned int set)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_cpupic_mask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
} else {
u16 tmp2;
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL);
tmp2 = GxICR(irq);
irq_affinity_online[irq] =
cpumask_any_and(d->affinity, cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) =
(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(struct irq_data *d)
{
unsigned int irq = d->irq;
/* the MN10300 PIC latches its interrupt request bit, even after the
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
} else {
tmp = GxICR(irq);
irq_affinity_online[irq] = cpumask_any_and(d->affinity,
cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SMP
static int
mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
unsigned long flags;
int err;
flags = arch_local_cli_save();
/* check irq no */
switch (d->irq) {
case TMJCIRQ:
case RESCHEDULE_IPI:
case CALL_FUNC_SINGLE_IPI:
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case DEBUGGER_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
case TM8IRQ:
#elif CONFIG_MN10300_TTYSM0_TIMER2
case TM2IRQ:
#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
#endif /* CONFIG_MN10300_TTYSM0 */
#ifdef CONFIG_MN10300_TTYSM1
case SC1RXIRQ:
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER9
case TM9IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER3
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
#ifdef CONFIG_MN10300_TTYSM2
case SC2RXIRQ:
case SC2TXIRQ:
case TM10IRQ:
#endif /* CONFIG_MN10300_TTYSM2 */
err = -1;
break;
default:
set_bit(d->irq, irq_affinity_request);
err = 0;
break;
}
arch_local_irq_restore(flags);
return err;
}
#endif /* CONFIG_SMP */
/*
* MN10300 PIC level-triggered IRQ handling.
*
* The PIC has no 'ACK' function per se. It is possible to clear individual
* channel latches, but each latch relatches whether or not the channel is
* masked, so we need to clear the latch when we unmask the channel.
*
* Also for this reason, we don't supply an ack() op (it's unused anyway if
* mask_ack() is provided), and mask_ack() just masks.
*/
static struct irq_chip mn10300_cpu_pic_level = {
.name = "cpu_l",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask_clear,
.irq_ack = NULL,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask,
.irq_unmask = mn10300_cpupic_unmask_clear,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* MN10300 PIC edge-triggered IRQ handling.
*
* We use the latch clearing function of the PIC as the 'ACK' function.
*/
static struct irq_chip mn10300_cpu_pic_edge = {
.name = "cpu_e",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask,
.irq_ack = mn10300_cpupic_ack,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask_ack,
.irq_unmask = mn10300_cpupic_unmask,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(int irq)
{
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
}
/*
* change the level at which an IRQ executes
* - must not be called whilst interrupts are being processed!
*/
void set_intr_level(int irq, u16 level)
{
BUG_ON(in_interrupt());
__mask_and_set_icr(irq, GxICR_ENABLE, level);
}
/*
* mark an interrupt to be ACK'd after interrupt handlers have been run rather
* than before
* - see Documentation/mn10300/features.txt
*/
void mn10300_set_lateack_irq_type(int irq)
{
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
}
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < NR_IRQS; irq++)
if (irq_get_chip(irq) == &no_irq_chip)
/* due to the PIC latching interrupt requests, even
* when the IRQ is disabled, IRQ_PENDING is superfluous
* and we can use handle_level_irq() for edge-triggered
* interrupts */
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
unit_init_IRQ();
}
/*
* handle normal device IRQs
*/
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
#ifdef CONFIG_MN10300_WD_TIMER
__IRQ_STAT(cpu_id, __irq_count)++;
#endif
irq_enter();
for (;;) {
/* ask the interrupt controller for the next IRQ to process
* - the result we get depends on EPSW.IM
*/
irq = IAGR & IAGR_GN;
if (!irq)
break;
local_irq_restore(irq_disabled_epsw);
generic_handle_irq(irq >> 2);
/* restore IRQ controls for IAGR access */
local_irq_restore(epsw);
}
__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
/*
* Display interrupt management information through /proc/interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_MN10300_WD_TIMER
int j;
seq_printf(p, "%*s: ", prec, "NMI");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
int irq;
unsigned int self, new;
unsigned long flags;
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_data *data = irq_get_irq_data(irq);
if (irqd_is_per_cpu(data))
continue;
if (cpumask_test_cpu(self, &data->affinity) &&
!cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
int cpu_id;
cpu_id = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu_id, &data->affinity);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
if (irq_affinity_online[irq] == self) {
u16 x, tmp;
x = GxICR(irq);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
new = cpumask_any_and(&data->affinity,
cpu_online_mask);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
(x & GxICR_LEVEL) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (GxICR(irq) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);
}
arch_local_irq_restore(flags);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
Hellmanor/kernel_v30c | net/netfilter/ipset/ip_set_bitmap_ipmac.c | 2569 | 16375 | /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <linux/netfilter/ipset/pfxlen.h>
#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_bitmap.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
MODULE_ALIAS("ip_set_bitmap:ip,mac");
enum {
MAC_EMPTY, /* element is not set */
MAC_FILLED, /* element is set with MAC */
MAC_UNSET, /* element is set, without MAC */
};
/* Type structure */
struct bitmap_ipmac {
void *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 timeout; /* timeout value */
struct timer_list gc; /* garbage collector */
size_t dsize; /* size of element */
};
/* ADT structure for generic function args */
struct ipmac {
u32 id; /* id in array */
unsigned char *ether; /* ethernet address */
};
/* Member element without and with timeout */
struct ipmac_elem {
unsigned char ether[ETH_ALEN];
unsigned char match;
} __attribute__ ((aligned));
struct ipmac_telem {
unsigned char ether[ETH_ALEN];
unsigned char match;
unsigned long timeout;
} __attribute__ ((aligned));
static inline void *
bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
{
return (void *)((char *)map->members + id * map->dsize);
}
static inline bool
bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
{
const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
return ip_set_timeout_test(elem->timeout);
}
static inline bool
bitmap_expired(const struct bitmap_ipmac *map, u32 id)
{
const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
return ip_set_timeout_expired(elem->timeout);
}
static inline int
bitmap_ipmac_exist(const struct ipmac_telem *elem)
{
return elem->match == MAC_UNSET ||
(elem->match == MAC_FILLED &&
!ip_set_timeout_expired(elem->timeout));
}
/* Base variant */
static int
bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
{
const struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
case MAC_FILLED:
return data->ether == NULL ||
compare_ether_addr(data->ether, elem->ether) == 0;
}
return 0;
}
static int
bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
if (!data->ether)
/* Already added without ethernet address */
return -IPSET_ERR_EXIST;
/* Fill the MAC address */
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
break;
case MAC_FILLED:
return -IPSET_ERR_EXIST;
case MAC_EMPTY:
if (data->ether) {
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
} else
elem->match = MAC_UNSET;
}
return 0;
}
static int
bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
if (elem->match == MAC_EMPTY)
return -IPSET_ERR_EXIST;
elem->match = MAC_EMPTY;
return 0;
}
static int
bitmap_ipmac_list(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct bitmap_ipmac *map = set->data;
const struct ipmac_elem *elem;
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
u32 last = map->last_ip - map->first_ip;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
elem = bitmap_ipmac_elem(map, id);
if (elem->match == MAC_EMPTY)
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id));
if (elem->match == MAC_FILLED)
NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
elem->ether);
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, atd);
if (unlikely(id == first)) {
cb->args[2] = 0;
return -EMSGSIZE;
}
return 0;
}
/* Timeout variant */
static int
bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
{
const struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
/* Trigger kernel to fill out the ethernet address */
return -EAGAIN;
case MAC_FILLED:
return (data->ether == NULL ||
compare_ether_addr(data->ether, elem->ether) == 0) &&
!bitmap_expired(map, data->id);
}
return 0;
}
static int
bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
switch (elem->match) {
case MAC_UNSET:
if (!data->ether)
/* Already added without ethernet address */
return -IPSET_ERR_EXIST;
/* Fill the MAC address and activate the timer */
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
if (timeout == map->timeout)
/* Timeout was not specified, get stored one */
timeout = elem->timeout;
elem->timeout = ip_set_timeout_set(timeout);
break;
case MAC_FILLED:
if (!bitmap_expired(map, data->id))
return -IPSET_ERR_EXIST;
/* Fall through */
case MAC_EMPTY:
if (data->ether) {
memcpy(elem->ether, data->ether, ETH_ALEN);
elem->match = MAC_FILLED;
} else
elem->match = MAC_UNSET;
/* If MAC is unset yet, we store plain timeout value
* because the timer is not activated yet
* and we can reuse it later when MAC is filled out,
* possibly by the kernel */
elem->timeout = data->ether ? ip_set_timeout_set(timeout)
: timeout;
break;
}
return 0;
}
static int
bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
{
struct bitmap_ipmac *map = set->data;
const struct ipmac *data = value;
struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
return -IPSET_ERR_EXIST;
elem->match = MAC_EMPTY;
return 0;
}
static int
bitmap_ipmac_tlist(const struct ip_set *set,
struct sk_buff *skb, struct netlink_callback *cb)
{
const struct bitmap_ipmac *map = set->data;
const struct ipmac_telem *elem;
struct nlattr *atd, *nested;
u32 id, first = cb->args[2];
u32 timeout, last = map->last_ip - map->first_ip;
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
if (!atd)
return -EMSGSIZE;
for (; cb->args[2] <= last; cb->args[2]++) {
id = cb->args[2];
elem = bitmap_ipmac_elem(map, id);
if (!bitmap_ipmac_exist(elem))
continue;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested) {
if (id == first) {
nla_nest_cancel(skb, atd);
return -EMSGSIZE;
} else
goto nla_put_failure;
}
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
htonl(map->first_ip + id));
if (elem->match == MAC_FILLED)
NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
elem->ether);
timeout = elem->match == MAC_UNSET ? elem->timeout
: ip_set_timeout_get(elem->timeout);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
ipset_nest_end(skb, nested);
}
ipset_nest_end(skb, atd);
/* Set listing finished */
cb->args[2] = 0;
return 0;
nla_put_failure:
nla_nest_cancel(skb, nested);
ipset_nest_end(skb, atd);
return -EMSGSIZE;
}
static int
bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
{
struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
/* MAC can be src only */
if (!(flags & IPSET_DIM_TWO_SRC))
return 0;
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
/* Backward compatibility: we don't check the second flag */
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
data.id -= map->first_ip;
data.ether = eth_hdr(skb)->h_source;
return adtfn(set, &data, map->timeout);
}
static int
bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
enum ipset_adt adt, u32 *lineno, u32 flags)
{
const struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
u32 timeout = map->timeout;
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
if (ret)
return ret;
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
if (tb[IPSET_ATTR_ETHER])
data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
else
data.ether = NULL;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!with_timeout(map->timeout))
return -IPSET_ERR_TIMEOUT;
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
data.id -= map->first_ip;
ret = adtfn(set, &data, timeout);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
static void
bitmap_ipmac_destroy(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
if (with_timeout(map->timeout))
del_timer_sync(&map->gc);
ip_set_free(map->members);
kfree(map);
set->data = NULL;
}
static void
bitmap_ipmac_flush(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
memset(map->members, 0,
(map->last_ip - map->first_ip + 1) * map->dsize);
}
static int
bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
{
const struct bitmap_ipmac *map = set->data;
struct nlattr *nested;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map)
+ (map->last_ip - map->first_ip + 1) * map->dsize));
if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
ipset_nest_end(skb, nested);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static bool
bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
{
const struct bitmap_ipmac *x = a->data;
const struct bitmap_ipmac *y = b->data;
return x->first_ip == y->first_ip &&
x->last_ip == y->last_ip &&
x->timeout == y->timeout;
}
static const struct ip_set_type_variant bitmap_ipmac = {
.kadt = bitmap_ipmac_kadt,
.uadt = bitmap_ipmac_uadt,
.adt = {
[IPSET_ADD] = bitmap_ipmac_add,
[IPSET_DEL] = bitmap_ipmac_del,
[IPSET_TEST] = bitmap_ipmac_test,
},
.destroy = bitmap_ipmac_destroy,
.flush = bitmap_ipmac_flush,
.head = bitmap_ipmac_head,
.list = bitmap_ipmac_list,
.same_set = bitmap_ipmac_same_set,
};
static const struct ip_set_type_variant bitmap_tipmac = {
.kadt = bitmap_ipmac_kadt,
.uadt = bitmap_ipmac_uadt,
.adt = {
[IPSET_ADD] = bitmap_ipmac_tadd,
[IPSET_DEL] = bitmap_ipmac_tdel,
[IPSET_TEST] = bitmap_ipmac_ttest,
},
.destroy = bitmap_ipmac_destroy,
.flush = bitmap_ipmac_flush,
.head = bitmap_ipmac_head,
.list = bitmap_ipmac_tlist,
.same_set = bitmap_ipmac_same_set,
};
static void
bitmap_ipmac_gc(unsigned long ul_set)
{
struct ip_set *set = (struct ip_set *) ul_set;
struct bitmap_ipmac *map = set->data;
struct ipmac_telem *elem;
u32 id, last = map->last_ip - map->first_ip;
/* We run parallel with other readers (test element)
* but adding/deleting new entries is locked out */
read_lock_bh(&set->lock);
for (id = 0; id <= last; id++) {
elem = bitmap_ipmac_elem(map, id);
if (elem->match == MAC_FILLED &&
ip_set_timeout_expired(elem->timeout))
elem->match = MAC_EMPTY;
}
read_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
static void
bitmap_ipmac_gc_init(struct ip_set *set)
{
struct bitmap_ipmac *map = set->data;
init_timer(&map->gc);
map->gc.data = (unsigned long) set;
map->gc.function = bitmap_ipmac_gc;
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
}
/* Create bitmap:ip,mac type of sets */
static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip)
{
map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
if (!map->members)
return false;
map->first_ip = first_ip;
map->last_ip = last_ip;
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
set->family = AF_INET;
return true;
}
static int
bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
u32 flags)
{
u32 first_ip, last_ip, elements;
struct bitmap_ipmac *map;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
return -IPSET_ERR_PROTOCOL;
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
if (ret)
return ret;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
if (ret)
return ret;
if (first_ip > last_ip) {
u32 tmp = first_ip;
first_ip = last_ip;
last_ip = tmp;
}
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (cidr >= 32)
return -IPSET_ERR_INVALID_CIDR;
last_ip = first_ip | ~ip_set_hostmask(cidr);
} else
return -IPSET_ERR_PROTOCOL;
elements = last_ip - first_ip + 1;
if (elements > IPSET_BITMAP_MAX_RANGE + 1)
return -IPSET_ERR_BITMAP_RANGE_SIZE;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
if (tb[IPSET_ATTR_TIMEOUT]) {
map->dsize = sizeof(struct ipmac_telem);
if (!init_map_ipmac(set, map, first_ip, last_ip)) {
kfree(map);
return -ENOMEM;
}
map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
set->variant = &bitmap_tipmac;
bitmap_ipmac_gc_init(set);
} else {
map->dsize = sizeof(struct ipmac_elem);
if (!init_map_ipmac(set, map, first_ip, last_ip)) {
kfree(map);
return -ENOMEM;
}
set->variant = &bitmap_ipmac;
}
return 0;
}
static struct ip_set_type bitmap_ipmac_type = {
.name = "bitmap:ip,mac",
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
.family = AF_INET,
.revision = 0,
.create = bitmap_ipmac_create,
.create_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
},
.adt_policy = {
[IPSET_ATTR_IP] = { .type = NLA_NESTED },
[IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
static int __init
bitmap_ipmac_init(void)
{
return ip_set_type_register(&bitmap_ipmac_type);
}
static void __exit
bitmap_ipmac_fini(void)
{
ip_set_type_unregister(&bitmap_ipmac_type);
}
module_init(bitmap_ipmac_init);
module_exit(bitmap_ipmac_fini);
| gpl-2.0 |
davem330/sparc | arch/arm/mach-spear6xx/spear600_evb.c | 2825 | 1230 | /*
* arch/arm/mach-spear6xx/spear600_evb.c
*
* SPEAr600 evaluation board source file
*
* Copyright (C) 2009 ST Microelectronics
* Viresh Kumar<viresh.kumar@st.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
#include <mach/hardware.h>
static struct amba_device *amba_devs[] __initdata = {
&gpio_device[0],
&gpio_device[1],
&gpio_device[2],
&uart_device[0],
&uart_device[1],
};
static struct platform_device *plat_devs[] __initdata = {
};
static void __init spear600_evb_init(void)
{
unsigned int i;
/* call spear600 machine init function */
spear600_init();
/* Add Platform Devices */
platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
/* Add Amba Devices */
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
}
MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
.boot_params = 0x00000100,
.map_io = spear6xx_map_io,
.init_irq = spear6xx_init_irq,
.timer = &spear6xx_timer,
.init_machine = spear600_evb_init,
MACHINE_END
| gpl-2.0 |
venkatkamesh/android_kernel_sonyz_msm8974 | fs/ext4/move_extent.c | 3081 | 41825 | /*
* Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
* Written by Takashi Sato <t-sato@yk.jp.nec.com>
* Akira Fujita <a-fujita@rs.jp.nec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include "ext4_jbd2.h"
#include "ext4.h"
/**
* get_ext_path - Find an extent path for designated logical block number.
*
* @inode: an inode which is searched
* @lblock: logical block number to find an extent path
* @path: pointer to an extent path pointer (for output)
*
* ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value
* on failure.
*/
static inline int
get_ext_path(struct inode *inode, ext4_lblk_t lblock,
struct ext4_ext_path **path)
{
int ret = 0;
*path = ext4_ext_find_extent(inode, lblock, *path);
if (IS_ERR(*path)) {
ret = PTR_ERR(*path);
*path = NULL;
} else if ((*path)[ext_depth(inode)].p_ext == NULL)
ret = -ENODATA;
return ret;
}
/**
* copy_extent_status - Copy the extent's initialization status
*
* @src: an extent for getting initialize status
* @dest: an extent to be set the status
*/
static void
copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
{
if (ext4_ext_is_uninitialized(src))
ext4_ext_mark_uninitialized(dest);
else
dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
}
/**
* mext_next_extent - Search for the next extent and set it to "extent"
*
* @inode: inode which is searched
* @path: this will obtain data for the next extent
* @extent: pointer to the next extent we have just gotten
*
* Search the next extent in the array of ext4_ext_path structure (@path)
* and set it to ext4_extent structure (@extent). In addition, the member of
* @path (->p_ext) also points the next extent. Return 0 on success, 1 if
* ext4_ext_path structure refers to the last extent, or a negative error
* value on failure.
*/
static int
mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent **extent)
{
struct ext4_extent_header *eh;
int ppos, leaf_ppos = path->p_depth;
ppos = leaf_ppos;
if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
/* leaf block */
*extent = ++path[ppos].p_ext;
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
return 0;
}
while (--ppos >= 0) {
if (EXT_LAST_INDEX(path[ppos].p_hdr) >
path[ppos].p_idx) {
int cur_ppos = ppos;
/* index block */
path[ppos].p_idx++;
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
if (path[ppos+1].p_bh)
brelse(path[ppos+1].p_bh);
path[ppos+1].p_bh =
sb_bread(inode->i_sb, path[ppos].p_block);
if (!path[ppos+1].p_bh)
return -EIO;
path[ppos+1].p_hdr =
ext_block_hdr(path[ppos+1].p_bh);
/* Halfway index block */
while (++cur_ppos < leaf_ppos) {
path[cur_ppos].p_idx =
EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
path[cur_ppos].p_block =
ext4_idx_pblock(path[cur_ppos].p_idx);
if (path[cur_ppos+1].p_bh)
brelse(path[cur_ppos+1].p_bh);
path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
path[cur_ppos].p_block);
if (!path[cur_ppos+1].p_bh)
return -EIO;
path[cur_ppos+1].p_hdr =
ext_block_hdr(path[cur_ppos+1].p_bh);
}
path[leaf_ppos].p_ext = *extent = NULL;
eh = path[leaf_ppos].p_hdr;
if (le16_to_cpu(eh->eh_entries) == 0)
/* empty leaf is found */
return -ENODATA;
/* leaf block */
path[leaf_ppos].p_ext = *extent =
EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
path[leaf_ppos].p_block =
ext4_ext_pblock(path[leaf_ppos].p_ext);
return 0;
}
}
/* We found the last extent */
return 1;
}
/**
* mext_check_null_inode - NULL check for two inodes
*
* If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
*/
static int
mext_check_null_inode(struct inode *inode1, struct inode *inode2,
const char *function, unsigned int line)
{
int ret = 0;
if (inode1 == NULL) {
__ext4_error(inode2->i_sb, function, line,
"Both inodes should not be NULL: "
"inode1 NULL inode2 %lu", inode2->i_ino);
ret = -EIO;
} else if (inode2 == NULL) {
__ext4_error(inode1->i_sb, function, line,
"Both inodes should not be NULL: "
"inode1 %lu inode2 NULL", inode1->i_ino);
ret = -EIO;
}
return ret;
}
/**
* double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
*
* @orig_inode: original inode structure
* @donor_inode: donor inode structure
* Acquire write lock of i_data_sem of the two inodes (orig and donor) by
* i_ino order.
*/
static void
double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
{
struct inode *first = orig_inode, *second = donor_inode;
/*
* Use the inode number to provide the stable locking order instead
* of its address, because the C language doesn't guarantee you can
* compare pointers that don't come from the same array.
*/
if (donor_inode->i_ino < orig_inode->i_ino) {
first = donor_inode;
second = orig_inode;
}
down_write(&EXT4_I(first)->i_data_sem);
down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
}
/**
* double_up_write_data_sem - Release two inodes' write lock of i_data_sem
*
* @orig_inode: original inode structure to be released its lock first
* @donor_inode: donor inode structure to be released its lock second
* Release write lock of i_data_sem of two inodes (orig and donor).
*/
static void
double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
{
up_write(&EXT4_I(orig_inode)->i_data_sem);
up_write(&EXT4_I(donor_inode)->i_data_sem);
}
/**
* mext_insert_across_blocks - Insert extents across leaf block
*
* @handle: journal handle
* @orig_inode: original inode
* @o_start: first original extent to be changed
* @o_end: last original extent to be changed
* @start_ext: first new extent to be inserted
* @new_ext: middle of new extent to be inserted
* @end_ext: last new extent to be inserted
*
* Allocate a new leaf block and insert extents into it. Return 0 on success,
* or a negative error value on failure.
*/
static int
mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
struct ext4_extent *o_start, struct ext4_extent *o_end,
struct ext4_extent *start_ext, struct ext4_extent *new_ext,
struct ext4_extent *end_ext)
{
struct ext4_ext_path *orig_path = NULL;
ext4_lblk_t eblock = 0;
int new_flag = 0;
int end_flag = 0;
int err = 0;
if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) {
if (o_start == o_end) {
/* start_ext new_ext end_ext
* donor |---------|-----------|--------|
* orig |------------------------------|
*/
end_flag = 1;
} else {
/* start_ext new_ext end_ext
* donor |---------|----------|---------|
* orig |---------------|--------------|
*/
o_end->ee_block = end_ext->ee_block;
o_end->ee_len = end_ext->ee_len;
ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
}
o_start->ee_len = start_ext->ee_len;
eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (start_ext->ee_len && new_ext->ee_len &&
!end_ext->ee_len && o_start == o_end) {
/* start_ext new_ext
* donor |--------------|---------------|
* orig |------------------------------|
*/
o_start->ee_len = start_ext->ee_len;
eblock = le32_to_cpu(start_ext->ee_block);
new_flag = 1;
} else if (!start_ext->ee_len && new_ext->ee_len &&
end_ext->ee_len && o_start == o_end) {
/* new_ext end_ext
* donor |--------------|---------------|
* orig |------------------------------|
*/
o_end->ee_block = end_ext->ee_block;
o_end->ee_len = end_ext->ee_len;
ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
/*
* Set 0 to the extent block if new_ext was
* the first block.
*/
if (new_ext->ee_block)
eblock = le32_to_cpu(new_ext->ee_block);
new_flag = 1;
} else {
ext4_debug("ext4 move extent: Unexpected insert case\n");
return -EIO;
}
if (new_flag) {
err = get_ext_path(orig_inode, eblock, &orig_path);
if (err)
goto out;
if (ext4_ext_insert_extent(handle, orig_inode,
orig_path, new_ext, 0))
goto out;
}
if (end_flag) {
err = get_ext_path(orig_inode,
le32_to_cpu(end_ext->ee_block) - 1, &orig_path);
if (err)
goto out;
if (ext4_ext_insert_extent(handle, orig_inode,
orig_path, end_ext, 0))
goto out;
}
out:
if (orig_path) {
ext4_ext_drop_refs(orig_path);
kfree(orig_path);
}
return err;
}
/**
* mext_insert_inside_block - Insert new extent to the extent block
*
* @o_start: first original extent to be moved
* @o_end: last original extent to be moved
* @start_ext: first new extent to be inserted
* @new_ext: middle of new extent to be inserted
* @end_ext: last new extent to be inserted
* @eh: extent header of target leaf block
* @range_to_move: used to decide how to insert extent
*
* Insert extents into the leaf block. The extent (@o_start) is overwritten
* by inserted extents.
*/
static void
mext_insert_inside_block(struct ext4_extent *o_start,
struct ext4_extent *o_end,
struct ext4_extent *start_ext,
struct ext4_extent *new_ext,
struct ext4_extent *end_ext,
struct ext4_extent_header *eh,
int range_to_move)
{
int i = 0;
unsigned long len;
/* Move the existing extents */
if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) {
len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) -
(unsigned long)(o_end + 1);
memmove(o_end + 1 + range_to_move, o_end + 1, len);
}
/* Insert start entry */
if (start_ext->ee_len)
o_start[i++].ee_len = start_ext->ee_len;
/* Insert new entry */
if (new_ext->ee_len) {
o_start[i] = *new_ext;
ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
}
/* Insert end entry */
if (end_ext->ee_len)
o_start[i] = *end_ext;
/* Increment the total entries counter on the extent block */
le16_add_cpu(&eh->eh_entries, range_to_move);
}
/**
* mext_insert_extents - Insert new extent
*
* @handle: journal handle
* @orig_inode: original inode
* @orig_path: path indicates first extent to be changed
* @o_start: first original extent to be changed
* @o_end: last original extent to be changed
* @start_ext: first new extent to be inserted
* @new_ext: middle of new extent to be inserted
* @end_ext: last new extent to be inserted
*
* Call the function to insert extents. If we cannot add more extents into
* the leaf block, we call mext_insert_across_blocks() to create a
* new leaf block. Otherwise call mext_insert_inside_block(). Return 0
* on success, or a negative error value on failure.
*/
static int
mext_insert_extents(handle_t *handle, struct inode *orig_inode,
struct ext4_ext_path *orig_path,
struct ext4_extent *o_start,
struct ext4_extent *o_end,
struct ext4_extent *start_ext,
struct ext4_extent *new_ext,
struct ext4_extent *end_ext)
{
struct ext4_extent_header *eh;
unsigned long need_slots, slots_range;
int range_to_move, depth, ret;
/*
* The extents need to be inserted
* start_extent + new_extent + end_extent.
*/
need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) +
(new_ext->ee_len ? 1 : 0);
/* The number of slots between start and end */
slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1)
/ sizeof(struct ext4_extent);
/* Range to move the end of extent */
range_to_move = need_slots - slots_range;
depth = orig_path->p_depth;
orig_path += depth;
eh = orig_path->p_hdr;
if (depth) {
/* Register to journal */
ret = ext4_journal_get_write_access(handle, orig_path->p_bh);
if (ret)
return ret;
}
/* Expansion */
if (range_to_move > 0 &&
(range_to_move > le16_to_cpu(eh->eh_max)
- le16_to_cpu(eh->eh_entries))) {
ret = mext_insert_across_blocks(handle, orig_inode, o_start,
o_end, start_ext, new_ext, end_ext);
if (ret < 0)
return ret;
} else
mext_insert_inside_block(o_start, o_end, start_ext, new_ext,
end_ext, eh, range_to_move);
if (depth) {
ret = ext4_handle_dirty_metadata(handle, orig_inode,
orig_path->p_bh);
if (ret)
return ret;
} else {
ret = ext4_mark_inode_dirty(handle, orig_inode);
if (ret < 0)
return ret;
}
return 0;
}
/**
* mext_leaf_block - Move one leaf extent block into the inode.
*
* @handle: journal handle
* @orig_inode: original inode
* @orig_path: path indicates first extent to be changed
* @dext: donor extent
* @from: start offset on the target file
*
* In order to insert extents into the leaf block, we must divide the extent
* in the leaf block into three extents. The one is located to be inserted
* extents, and the others are located around it.
*
* Therefore, this function creates structures to save extents of the leaf
* block, and inserts extents by calling mext_insert_extents() with
* created extents. Return 0 on success, or a negative error value on failure.
*/
static int
mext_leaf_block(handle_t *handle, struct inode *orig_inode,
struct ext4_ext_path *orig_path, struct ext4_extent *dext,
ext4_lblk_t *from)
{
struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
struct ext4_extent new_ext, start_ext, end_ext;
ext4_lblk_t new_ext_end;
int oext_alen, new_ext_alen, end_ext_alen;
int depth = ext_depth(orig_inode);
int ret;
start_ext.ee_block = end_ext.ee_block = 0;
o_start = o_end = oext = orig_path[depth].p_ext;
oext_alen = ext4_ext_get_actual_len(oext);
start_ext.ee_len = end_ext.ee_len = 0;
new_ext.ee_block = cpu_to_le32(*from);
ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
new_ext.ee_len = dext->ee_len;
new_ext_alen = ext4_ext_get_actual_len(&new_ext);
new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
/*
* Case: original extent is first
* oext |--------|
* new_ext |--|
* start_ext |--|
*/
if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) &&
le32_to_cpu(new_ext.ee_block) <
le32_to_cpu(oext->ee_block) + oext_alen) {
start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
le32_to_cpu(oext->ee_block));
start_ext.ee_block = oext->ee_block;
copy_extent_status(oext, &start_ext);
} else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
prev_ext = oext - 1;
/*
* We can merge new_ext into previous extent,
* if these are contiguous and same extent type.
*/
if (ext4_can_extents_be_merged(orig_inode, prev_ext,
&new_ext)) {
o_start = prev_ext;
start_ext.ee_len = cpu_to_le16(
ext4_ext_get_actual_len(prev_ext) +
new_ext_alen);
start_ext.ee_block = oext->ee_block;
copy_extent_status(prev_ext, &start_ext);
new_ext.ee_len = 0;
}
}
/*
* Case: new_ext_end must be less than oext
* oext |-----------|
* new_ext |-------|
*/
if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
EXT4_ERROR_INODE(orig_inode,
"new_ext_end(%u) should be less than or equal to "
"oext->ee_block(%u) + oext_alen(%d) - 1",
new_ext_end, le32_to_cpu(oext->ee_block),
oext_alen);
ret = -EIO;
goto out;
}
/*
* Case: new_ext is smaller than original extent
* oext |---------------|
* new_ext |-----------|
* end_ext |---|
*/
if (le32_to_cpu(oext->ee_block) <= new_ext_end &&
new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) {
end_ext.ee_len =
cpu_to_le16(le32_to_cpu(oext->ee_block) +
oext_alen - 1 - new_ext_end);
copy_extent_status(oext, &end_ext);
end_ext_alen = ext4_ext_get_actual_len(&end_ext);
ext4_ext_store_pblock(&end_ext,
(ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
end_ext.ee_block =
cpu_to_le32(le32_to_cpu(o_end->ee_block) +
oext_alen - end_ext_alen);
}
ret = mext_insert_extents(handle, orig_inode, orig_path, o_start,
o_end, &start_ext, &new_ext, &end_ext);
out:
return ret;
}
/**
* mext_calc_swap_extents - Calculate extents for extent swapping.
*
* @tmp_dext: the extent that will belong to the original inode
* @tmp_oext: the extent that will belong to the donor inode
* @orig_off: block offset of original inode
* @donor_off: block offset of donor inode
* @max_count: the maximum length of extents
*
* Return 0 on success, or a negative error value on failure.
*/
static int
mext_calc_swap_extents(struct ext4_extent *tmp_dext,
struct ext4_extent *tmp_oext,
ext4_lblk_t orig_off, ext4_lblk_t donor_off,
ext4_lblk_t max_count)
{
ext4_lblk_t diff, orig_diff;
struct ext4_extent dext_old, oext_old;
BUG_ON(orig_off != donor_off);
/* original and donor extents have to cover the same block offset */
if (orig_off < le32_to_cpu(tmp_oext->ee_block) ||
le32_to_cpu(tmp_oext->ee_block) +
ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off)
return -ENODATA;
if (orig_off < le32_to_cpu(tmp_dext->ee_block) ||
le32_to_cpu(tmp_dext->ee_block) +
ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off)
return -ENODATA;
dext_old = *tmp_dext;
oext_old = *tmp_oext;
/* When tmp_dext is too large, pick up the target range. */
diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
tmp_dext->ee_block =
cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff);
tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff);
if (max_count < ext4_ext_get_actual_len(tmp_dext))
tmp_dext->ee_len = cpu_to_le16(max_count);
orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
/* Adjust extent length if donor extent is larger than orig */
if (ext4_ext_get_actual_len(tmp_dext) >
ext4_ext_get_actual_len(tmp_oext) - orig_diff)
tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) -
orig_diff);
tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext));
copy_extent_status(&oext_old, tmp_dext);
copy_extent_status(&dext_old, tmp_oext);
return 0;
}
/**
* mext_replace_branches - Replace original extents with new extents
*
* @handle: journal handle
* @orig_inode: original inode
* @donor_inode: donor inode
* @from: block offset of orig_inode
* @count: block count to be replaced
* @err: pointer to save return value
*
* Replace original inode extents and donor inode extents page by page.
* We implement this replacement in the following three steps:
* 1. Save the block information of original and donor inodes into
* dummy extents.
* 2. Change the block information of original inode to point at the
* donor inode blocks.
* 3. Change the block information of donor inode to point at the saved
* original inode blocks in the dummy extents.
*
* Return replaced block count.
*/
static int
mext_replace_branches(handle_t *handle, struct inode *orig_inode,
struct inode *donor_inode, ext4_lblk_t from,
ext4_lblk_t count, int *err)
{
struct ext4_ext_path *orig_path = NULL;
struct ext4_ext_path *donor_path = NULL;
struct ext4_extent *oext, *dext;
struct ext4_extent tmp_dext, tmp_oext;
ext4_lblk_t orig_off = from, donor_off = from;
int depth;
int replaced_count = 0;
int dext_alen;
/* Protect extent trees against block allocations via delalloc */
double_down_write_data_sem(orig_inode, donor_inode);
/* Get the original extent for the block "orig_off" */
*err = get_ext_path(orig_inode, orig_off, &orig_path);
if (*err)
goto out;
/* Get the donor extent for the head */
*err = get_ext_path(donor_inode, donor_off, &donor_path);
if (*err)
goto out;
depth = ext_depth(orig_inode);
oext = orig_path[depth].p_ext;
tmp_oext = *oext;
depth = ext_depth(donor_inode);
dext = donor_path[depth].p_ext;
tmp_dext = *dext;
*err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
donor_off, count);
if (*err)
goto out;
/* Loop for the donor extents */
while (1) {
/* The extent for donor must be found. */
if (!dext) {
EXT4_ERROR_INODE(donor_inode,
"The extent for donor must be found");
*err = -EIO;
goto out;
} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
EXT4_ERROR_INODE(donor_inode,
"Donor offset(%u) and the first block of donor "
"extent(%u) should be equal",
donor_off,
le32_to_cpu(tmp_dext.ee_block));
*err = -EIO;
goto out;
}
/* Set donor extent to orig extent */
*err = mext_leaf_block(handle, orig_inode,
orig_path, &tmp_dext, &orig_off);
if (*err)
goto out;
/* Set orig extent to donor extent */
*err = mext_leaf_block(handle, donor_inode,
donor_path, &tmp_oext, &donor_off);
if (*err)
goto out;
dext_alen = ext4_ext_get_actual_len(&tmp_dext);
replaced_count += dext_alen;
donor_off += dext_alen;
orig_off += dext_alen;
/* Already moved the expected blocks */
if (replaced_count >= count)
break;
if (orig_path)
ext4_ext_drop_refs(orig_path);
*err = get_ext_path(orig_inode, orig_off, &orig_path);
if (*err)
goto out;
depth = ext_depth(orig_inode);
oext = orig_path[depth].p_ext;
tmp_oext = *oext;
if (donor_path)
ext4_ext_drop_refs(donor_path);
*err = get_ext_path(donor_inode, donor_off, &donor_path);
if (*err)
goto out;
depth = ext_depth(donor_inode);
dext = donor_path[depth].p_ext;
tmp_dext = *dext;
*err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
donor_off, count - replaced_count);
if (*err)
goto out;
}
out:
if (orig_path) {
ext4_ext_drop_refs(orig_path);
kfree(orig_path);
}
if (donor_path) {
ext4_ext_drop_refs(donor_path);
kfree(donor_path);
}
ext4_ext_invalidate_cache(orig_inode);
ext4_ext_invalidate_cache(donor_inode);
double_up_write_data_sem(orig_inode, donor_inode);
return replaced_count;
}
/**
* move_extent_per_page - Move extent data per page
*
* @o_filp: file structure of original file
* @donor_inode: donor inode
* @orig_page_offset: page index on original file
* @data_offset_in_page: block index where data swapping starts
* @block_len_in_page: the number of blocks to be swapped
* @uninit: orig extent is uninitialized or not
* @err: pointer to save return value
*
* Save the data in original inode blocks and replace original inode extents
* with donor inode extents by calling mext_replace_branches().
* Finally, write out the saved data in new original inode blocks. Return
* replaced block count.
*/
static int
move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
pgoff_t orig_page_offset, int data_offset_in_page,
int block_len_in_page, int uninit, int *err)
{
struct inode *orig_inode = o_filp->f_dentry->d_inode;
struct address_space *mapping = orig_inode->i_mapping;
struct buffer_head *bh;
struct page *page = NULL;
const struct address_space_operations *a_ops = mapping->a_ops;
handle_t *handle;
ext4_lblk_t orig_blk_offset;
long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int w_flags = 0;
unsigned int tmp_data_size, data_size, replaced_size;
void *fsdata;
int i, jblocks;
int err2 = 0;
int replaced_count = 0;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
/*
* It needs twice the amount of ordinary journal buffers because
* inode and donor_inode may change each different metadata blocks.
*/
jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
handle = ext4_journal_start(orig_inode, jblocks);
if (IS_ERR(handle)) {
*err = PTR_ERR(handle);
return 0;
}
if (segment_eq(get_fs(), KERNEL_DS))
w_flags |= AOP_FLAG_UNINTERRUPTIBLE;
orig_blk_offset = orig_page_offset * blocks_per_page +
data_offset_in_page;
/*
* If orig extent is uninitialized one,
* it's not necessary force the page into memory
* and then force it to be written out again.
* Just swap data blocks between orig and donor.
*/
if (uninit) {
replaced_count = mext_replace_branches(handle, orig_inode,
donor_inode, orig_blk_offset,
block_len_in_page, err);
goto out2;
}
offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
/* Calculate data_size */
if ((orig_blk_offset + block_len_in_page - 1) ==
((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
/* Replace the last block */
tmp_data_size = orig_inode->i_size & (blocksize - 1);
/*
* If data_size equal zero, it shows data_size is multiples of
* blocksize. So we set appropriate value.
*/
if (tmp_data_size == 0)
tmp_data_size = blocksize;
data_size = tmp_data_size +
((block_len_in_page - 1) << orig_inode->i_blkbits);
} else
data_size = block_len_in_page << orig_inode->i_blkbits;
replaced_size = data_size;
*err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags,
&page, &fsdata);
if (unlikely(*err < 0))
goto out;
if (!PageUptodate(page)) {
mapping->a_ops->readpage(o_filp, page);
lock_page(page);
}
/*
* try_to_release_page() doesn't call releasepage in writeback mode.
* We should care about the order of writing to the same file
* by multiple move extent processes.
* It needs to call wait_on_page_writeback() to wait for the
* writeback of the page.
*/
wait_on_page_writeback(page);
/* Release old bh and drop refs */
try_to_release_page(page, 0);
replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
orig_blk_offset, block_len_in_page,
&err2);
if (err2) {
if (replaced_count) {
block_len_in_page = replaced_count;
replaced_size =
block_len_in_page << orig_inode->i_blkbits;
} else
goto out;
}
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0);
bh = page_buffers(page);
for (i = 0; i < data_offset_in_page; i++)
bh = bh->b_this_page;
for (i = 0; i < block_len_in_page; i++) {
*err = ext4_get_block(orig_inode,
(sector_t)(orig_blk_offset + i), bh, 0);
if (*err < 0)
goto out;
if (bh->b_this_page != NULL)
bh = bh->b_this_page;
}
*err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size,
page, fsdata);
page = NULL;
out:
if (unlikely(page)) {
if (PageLocked(page))
unlock_page(page);
page_cache_release(page);
ext4_journal_stop(handle);
}
out2:
ext4_journal_stop(handle);
if (err2)
*err = err2;
return replaced_count;
}
/**
* mext_check_arguments - Check whether move extent can be done
*
* @orig_inode: original inode
* @donor_inode: donor inode
* @orig_start: logical start offset in block for orig
* @donor_start: logical start offset in block for donor
* @len: the number of blocks to be moved
*
* Check the arguments of ext4_move_extents() whether the files can be
* exchanged with each other.
* Return 0 on success, or a negative error value on failure.
*/
static int
mext_check_arguments(struct inode *orig_inode,
struct inode *donor_inode, __u64 orig_start,
__u64 donor_start, __u64 *len)
{
ext4_lblk_t orig_blocks, donor_blocks;
unsigned int blkbits = orig_inode->i_blkbits;
unsigned int blocksize = 1 << blkbits;
if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
ext4_debug("ext4 move extent: suid or sgid is set"
" to donor file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
return -EPERM;
/* Ext4 move extent does not support swapfile */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
"not be swapfile [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* Files should be in the same ext4 FS */
if (orig_inode->i_sb != donor_inode->i_sb) {
ext4_debug("ext4 move extent: The argument files "
"should be in same FS [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
"based file [ino:orig %lu]\n", orig_inode->i_ino);
return -EOPNOTSUPP;
} else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: donor file is not extents "
"based file [ino:donor %lu]\n", donor_inode->i_ino);
return -EOPNOTSUPP;
}
if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
ext4_debug("ext4 move extent: File size is 0 byte\n");
return -EINVAL;
}
/* Start offset should be same */
if (orig_start != donor_start) {
ext4_debug("ext4 move extent: orig and donor's start "
"offset are not same [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
if ((orig_start >= EXT_MAX_BLOCKS) ||
(donor_start >= EXT_MAX_BLOCKS) ||
(*len > EXT_MAX_BLOCKS) ||
(orig_start + *len >= EXT_MAX_BLOCKS)) {
ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
"[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
if (orig_inode->i_size > donor_inode->i_size) {
donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits;
/* TODO: eliminate this artificial restriction */
if (orig_start >= donor_blocks) {
ext4_debug("ext4 move extent: orig start offset "
"[%llu] should be less than donor file blocks "
"[%u] [ino:orig %lu, donor %lu]\n",
orig_start, donor_blocks,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* TODO: eliminate this artificial restriction */
if (orig_start + *len > donor_blocks) {
ext4_debug("ext4 move extent: End offset [%llu] should "
"be less than donor file blocks [%u]."
"So adjust length from %llu to %llu "
"[ino:orig %lu, donor %lu]\n",
orig_start + *len, donor_blocks,
*len, donor_blocks - orig_start,
orig_inode->i_ino, donor_inode->i_ino);
*len = donor_blocks - orig_start;
}
} else {
orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits;
if (orig_start >= orig_blocks) {
ext4_debug("ext4 move extent: start offset [%llu] "
"should be less than original file blocks "
"[%u] [ino:orig %lu, donor %lu]\n",
orig_start, orig_blocks,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
if (orig_start + *len > orig_blocks) {
ext4_debug("ext4 move extent: Adjust length "
"from %llu to %llu. Because it should be "
"less than original file blocks "
"[ino:orig %lu, donor %lu]\n",
*len, orig_blocks - orig_start,
orig_inode->i_ino, donor_inode->i_ino);
*len = orig_blocks - orig_start;
}
}
if (!*len) {
ext4_debug("ext4 move extent: len should not be 0 "
"[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
donor_inode->i_ino);
return -EINVAL;
}
return 0;
}
/**
* mext_inode_double_lock - Lock i_mutex on both @inode1 and @inode2
*
* @inode1: the inode structure
* @inode2: the inode structure
*
* Lock two inodes' i_mutex by i_ino order.
* If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
*/
static int
mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
{
int ret = 0;
BUG_ON(inode1 == NULL && inode2 == NULL);
ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
if (ret < 0)
goto out;
if (inode1 == inode2) {
mutex_lock(&inode1->i_mutex);
goto out;
}
if (inode1->i_ino < inode2->i_ino) {
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
} else {
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
}
out:
return ret;
}
/**
* mext_inode_double_unlock - Release i_mutex on both @inode1 and @inode2
*
* @inode1: the inode that is released first
* @inode2: the inode that is released second
*
* If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
*/
static int
mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
{
int ret = 0;
BUG_ON(inode1 == NULL && inode2 == NULL);
ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
if (ret < 0)
goto out;
if (inode1)
mutex_unlock(&inode1->i_mutex);
if (inode2 && inode2 != inode1)
mutex_unlock(&inode2->i_mutex);
out:
return ret;
}
/**
* ext4_move_extents - Exchange the specified range of a file
*
* @o_filp: file structure of the original file
* @d_filp: file structure of the donor file
* @orig_start: start offset in block for orig
* @donor_start: start offset in block for donor
* @len: the number of blocks to be moved
* @moved_len: moved block length
*
* This function returns 0 and moved block length is set in moved_len
* if succeed, otherwise returns error value.
*
* Note: ext4_move_extents() proceeds the following order.
* 1:ext4_move_extents() calculates the last block number of moving extent
* function by the start block number (orig_start) and the number of blocks
* to be moved (len) specified as arguments.
* If the {orig, donor}_start points a hole, the extent's start offset
* pointed by ext_cur (current extent), holecheck_path, orig_path are set
* after hole behind.
* 2:Continue step 3 to step 5, until the holecheck_path points to last_extent
* or the ext_cur exceeds the block_end which is last logical block number.
* 3:To get the length of continues area, call mext_next_extent()
* specified with the ext_cur (initial value is holecheck_path) re-cursive,
* until find un-continuous extent, the start logical block number exceeds
* the block_end or the extent points to the last extent.
* 4:Exchange the original inode data with donor inode data
* from orig_page_offset to seq_end_page.
* The start indexes of data are specified as arguments.
* That of the original inode is orig_page_offset,
* and the donor inode is also orig_page_offset
* (To easily handle blocksize != pagesize case, the offset for the
* donor inode is block unit).
* 5:Update holecheck_path and orig_path to points a next proceeding extent,
* then returns to step 2.
* 6:Release holecheck_path, orig_path and set the len to moved_len
* which shows the number of moved blocks.
* The moved_len is useful for the command to calculate the file offset
* for starting next move extent ioctl.
* 7:Return 0 on success, or a negative error value on failure.
*/
int
ext4_move_extents(struct file *o_filp, struct file *d_filp,
__u64 orig_start, __u64 donor_start, __u64 len,
__u64 *moved_len)
{
struct inode *orig_inode = o_filp->f_dentry->d_inode;
struct inode *donor_inode = d_filp->f_dentry->d_inode;
struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL;
struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
ext4_lblk_t block_start = orig_start;
ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
ext4_lblk_t rest_blocks;
pgoff_t orig_page_offset = 0, seq_end_page;
int ret1, ret2, depth, last_extent = 0;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
int data_offset_in_page;
int block_len_in_page;
int uninit;
/* orig and donor should be different file */
if (orig_inode->i_ino == donor_inode->i_ino) {
ext4_debug("ext4 move extent: The argument files should not "
"be same file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* Regular file check */
if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
ext4_debug("ext4 move extent: The argument files should be "
"regular file [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
/* Protect orig and donor inodes against a truncate */
ret1 = mext_inode_double_lock(orig_inode, donor_inode);
if (ret1 < 0)
return ret1;
/* Protect extent tree against block allocations via delalloc */
double_down_write_data_sem(orig_inode, donor_inode);
/* Check the filesystem environment whether move_extent can be done */
ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
donor_start, &len);
if (ret1)
goto out;
file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
block_end = block_start + len - 1;
if (file_end < block_end)
len -= block_end - file_end;
ret1 = get_ext_path(orig_inode, block_start, &orig_path);
if (ret1)
goto out;
/* Get path structure to check the hole */
ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
if (ret1)
goto out;
depth = ext_depth(orig_inode);
ext_cur = holecheck_path[depth].p_ext;
/*
* Get proper starting location of block replacement if block_start was
* within the hole.
*/
if (le32_to_cpu(ext_cur->ee_block) +
ext4_ext_get_actual_len(ext_cur) - 1 < block_start) {
/*
* The hole exists between extents or the tail of
* original file.
*/
last_extent = mext_next_extent(orig_inode,
holecheck_path, &ext_cur);
if (last_extent < 0) {
ret1 = last_extent;
goto out;
}
last_extent = mext_next_extent(orig_inode, orig_path,
&ext_dummy);
if (last_extent < 0) {
ret1 = last_extent;
goto out;
}
seq_start = le32_to_cpu(ext_cur->ee_block);
} else if (le32_to_cpu(ext_cur->ee_block) > block_start)
/* The hole exists at the beginning of original file. */
seq_start = le32_to_cpu(ext_cur->ee_block);
else
seq_start = block_start;
/* No blocks within the specified range. */
if (le32_to_cpu(ext_cur->ee_block) > block_end) {
ext4_debug("ext4 move extent: The specified range of file "
"may be the hole\n");
ret1 = -EINVAL;
goto out;
}
/* Adjust start blocks */
add_blocks = min(le32_to_cpu(ext_cur->ee_block) +
ext4_ext_get_actual_len(ext_cur), block_end + 1) -
max(le32_to_cpu(ext_cur->ee_block), block_start);
while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) {
seq_blocks += add_blocks;
/* Adjust tail blocks */
if (seq_start + seq_blocks - 1 > block_end)
seq_blocks = block_end - seq_start + 1;
ext_prev = ext_cur;
last_extent = mext_next_extent(orig_inode, holecheck_path,
&ext_cur);
if (last_extent < 0) {
ret1 = last_extent;
break;
}
add_blocks = ext4_ext_get_actual_len(ext_cur);
/*
* Extend the length of contiguous block (seq_blocks)
* if extents are contiguous.
*/
if (ext4_can_extents_be_merged(orig_inode,
ext_prev, ext_cur) &&
block_end >= le32_to_cpu(ext_cur->ee_block) &&
!last_extent)
continue;
/* Is original extent is uninitialized */
uninit = ext4_ext_is_uninitialized(ext_prev);
data_offset_in_page = seq_start % blocks_per_page;
/*
* Calculate data blocks count that should be swapped
* at the first page.
*/
if (data_offset_in_page + seq_blocks > blocks_per_page) {
/* Swapped blocks are across pages */
block_len_in_page =
blocks_per_page - data_offset_in_page;
} else {
/* Swapped blocks are in a page */
block_len_in_page = seq_blocks;
}
orig_page_offset = seq_start >>
(PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
seq_end_page = (seq_start + seq_blocks - 1) >>
(PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
seq_start = le32_to_cpu(ext_cur->ee_block);
rest_blocks = seq_blocks;
/*
* Up semaphore to avoid following problems:
* a. transaction deadlock among ext4_journal_start,
* ->write_begin via pagefault, and jbd2_journal_commit
* b. racing with ->readpage, ->write_begin, and ext4_get_block
* in move_extent_per_page
*/
double_up_write_data_sem(orig_inode, donor_inode);
while (orig_page_offset <= seq_end_page) {
/* Swap original branches with new branches */
block_len_in_page = move_extent_per_page(
o_filp, donor_inode,
orig_page_offset,
data_offset_in_page,
block_len_in_page, uninit,
&ret1);
/* Count how many blocks we have exchanged */
*moved_len += block_len_in_page;
if (ret1 < 0)
break;
if (*moved_len > len) {
EXT4_ERROR_INODE(orig_inode,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
ret1 = -EIO;
break;
}
orig_page_offset++;
data_offset_in_page = 0;
rest_blocks -= block_len_in_page;
if (rest_blocks > blocks_per_page)
block_len_in_page = blocks_per_page;
else
block_len_in_page = rest_blocks;
}
double_down_write_data_sem(orig_inode, donor_inode);
if (ret1 < 0)
break;
/* Decrease buffer counter */
if (holecheck_path)
ext4_ext_drop_refs(holecheck_path);
ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
if (ret1)
break;
depth = holecheck_path->p_depth;
/* Decrease buffer counter */
if (orig_path)
ext4_ext_drop_refs(orig_path);
ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
if (ret1)
break;
ext_cur = holecheck_path[depth].p_ext;
add_blocks = ext4_ext_get_actual_len(ext_cur);
seq_blocks = 0;
}
out:
if (*moved_len) {
ext4_discard_preallocations(orig_inode);
ext4_discard_preallocations(donor_inode);
}
if (orig_path) {
ext4_ext_drop_refs(orig_path);
kfree(orig_path);
}
if (holecheck_path) {
ext4_ext_drop_refs(holecheck_path);
kfree(holecheck_path);
}
double_up_write_data_sem(orig_inode, donor_inode);
ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
if (ret1)
return ret1;
else if (ret2)
return ret2;
return 0;
}
| gpl-2.0 |
flexdroid/kernel | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 3593 | 43475 | /*
* Copyright (c) 2006 Mellanox Technologies. All rights reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_cm.h>
#include <net/dst.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include "ipoib.h"
int ipoib_max_conn_qp = 128;
module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
MODULE_PARM_DESC(max_nonsrq_conn_qp,
"Max number of connected-mode QPs per interface "
"(applied only if shared receive queue is not available)");
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;
module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
MODULE_PARM_DESC(cm_data_debug_level,
"Enable data path debug tracing for connected mode if > 0");
#endif
#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
.wr_id = IPOIB_CM_RX_DRAIN_WRID,
.opcode = IB_WR_SEND,
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
int i;
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < frags; ++i)
ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
}
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_recv_wr *bad_wr;
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
priv->cm.srq_ring[id].skb = NULL;
}
return ret;
}
static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ipoib_cm_rx *rx,
struct ib_recv_wr *wr,
struct ib_sge *sge, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_recv_wr *bad_wr;
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
ret = ib_post_recv(rx->qp, wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx->rx_ring[id].mapping);
dev_kfree_skb_any(rx->rx_ring[id].skb);
rx->rx_ring[id].skb = NULL;
}
return ret;
}
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int i;
skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
if (unlikely(!skb))
return NULL;
/*
* IPoIB adds a 4 byte header. So we need 12 more bytes to align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, 12);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb);
return NULL;
}
for (i = 0; i < frags; i++) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
}
static void ipoib_cm_free_rx_ring(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
if (rx_ring[i].skb) {
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx_ring[i].mapping);
dev_kfree_skb_any(rx_ring[i].skb);
}
vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
* make sure we have at most 1 outstanding WR. */
if (list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list))
return;
/*
* QPs on flush list are error state. This way, a "flush
* error" WC will be immediately generated for each WR we post.
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
}
static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
{
struct ipoib_cm_rx *p = ctx;
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
unsigned long flags;
if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
return;
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_flush_list);
p->state = IPOIB_CM_RX_FLUSH;
ipoib_cm_start_rx_drain(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
struct ipoib_cm_rx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_init_attr attr = {
.event_handler = ipoib_cm_rx_event_handler,
.send_cq = priv->recv_cq, /* For drain WR */
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = 1, /* For drain WR */
.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = p,
};
if (!ipoib_cm_has_srq(dev)) {
attr.cap.max_recv_wr = ipoib_recvq_size;
attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
}
return ib_create_qp(priv->pd, &attr);
}
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
unsigned psn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_INIT;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
return ret;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = psn;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
/*
* Current Mellanox HCA firmware won't generate completions
* with error for drain WRs unless the QP has been moved to
* RTS first. This work-around leaves a window where a QP has
* moved to error asynchronously, but this will eventually get
* fixed in firmware, so let's not error out if modify QP
* fails.
*/
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return 0;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return 0;
}
return 0;
}
static void ipoib_cm_init_rx_wr(struct net_device *dev,
struct ib_recv_wr *wr,
struct ib_sge *sge)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < priv->cm.num_frags; ++i)
sge[i].lkey = priv->mr->lkey;
sge[0].length = IPOIB_CM_HEAD_SIZE;
for (i = 1; i < priv->cm.num_frags; ++i)
sge[i].length = PAGE_SIZE;
wr->next = NULL;
wr->sg_list = sge;
wr->num_sge = priv->cm.num_frags;
}
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
struct ipoib_cm_rx *rx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct {
struct ib_recv_wr wr;
struct ib_sge sge[IPOIB_CM_RX_SG];
} *t;
int ret;
int i;
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
if (!rx->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
}
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
goto err_free;
}
ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
spin_lock_irq(&priv->lock);
if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
spin_unlock_irq(&priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
ret = -EINVAL;
goto err_free;
} else
++priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
rx->rx_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
}
ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
if (ret) {
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
"failed for buf %d\n", i);
ret = -EIO;
goto err_count;
}
}
rx->recv_count = ipoib_recvq_size;
kfree(t);
return 0;
err_count:
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
err_free:
kfree(t);
ipoib_cm_free_rx_ring(dev, rx->rx_ring);
return ret;
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
struct ib_qp *qp, struct ib_cm_req_event_param *req,
unsigned psn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_rep_param rep = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
rep.private_data_len = sizeof data;
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
rep.qp_num = qp->qp_num;
rep.starting_psn = psn;
return ib_send_cm_rep(cm_id, &rep);
}
static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
p->id = cm_id;
cm_id->context = p;
p->state = IPOIB_CM_RX_LIVE;
p->jiffies = jiffies;
INIT_LIST_HEAD(&p->list);
p->qp = ipoib_cm_create_rx_qp(dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
goto err_qp;
}
psn = random32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
if (!ipoib_cm_has_srq(dev)) {
ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
if (ret)
goto err_modify;
}
spin_lock_irq(&priv->lock);
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
p->jiffies = jiffies;
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irq(&priv->lock);
ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
if (ret) {
ipoib_warn(priv, "failed to send REP: %d\n", ret);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
}
return 0;
err_modify:
ib_destroy_qp(p->qp);
err_qp:
kfree(p);
return ret;
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = netdev_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
/* Fall through */
default:
return 0;
}
}
/* Adjust length of skb with fragments to match received data */
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length, struct sk_buff *toskb)
{
int i, num_frags;
unsigned int size;
/* put header into skb */
size = min(length, hdr_space);
skb->tail += size;
skb->len += size;
length -= size;
num_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (length == 0) {
/* don't need this page */
skb_fill_page_desc(toskb, i, skb_frag_page(frag),
0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
skb->len += size;
length -= size;
}
}
}
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx_buf *rx_ring;
unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
struct sk_buff *skb, *newskb;
struct ipoib_cm_rx *p;
unsigned long flags;
u64 mapping[IPOIB_CM_RX_SG];
int frags;
int has_srq;
struct sk_buff *small_skb;
ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
return;
}
p = wc->qp->qp_context;
has_srq = ipoib_cm_has_srq(dev);
rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
ipoib_dbg(priv, "cm recv error "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
else {
if (!--p->recv_count) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
}
return;
}
}
if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
spin_lock_irqsave(&priv->lock, flags);
p->jiffies = jiffies;
/* Move this entry to list head, but do not re-add it
* if it has been moved out of list. */
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
small_skb = dev_alloc_skb(dlen + 12);
if (small_skb) {
skb_reserve(small_skb, 12);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_put(small_skb, dlen);
skb = small_skb;
goto copied;
}
}
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
++dev->stats.rx_dropped;
goto repost;
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
/* XXX get correct PACKET_ type here */
skb->pkt_type = PACKET_HOST;
netif_receive_skb(skb);
repost:
if (has_srq) {
if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
"for buf %d\n", wr_id);
} else {
if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
&priv->cm.rx_wr,
priv->cm.rx_sge,
wr_id))) {
--p->recv_count;
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
"for buf %d\n", wr_id);
}
}
}
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
u64 addr, int len)
{
struct ib_send_wr *bad_wr;
priv->tx_sge[0].addr = addr;
priv->tx_sge[0].length = len;
priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx_buf *tx_req;
u64 addr;
int rc;
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, tx->mtu);
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
/*
* We put the skb into the tx_ring _before_ we call post_send()
* because it's entirely possible that the completion handler will
* run before we execute anything after the post_send(). That
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
tx_req->mapping = addr;
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
++tx->tx_head;
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
}
}
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
struct ipoib_cm_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
wr_id, ipoib_sendq_size);
return;
}
tx_req = &tx->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock(dev);
++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
ipoib_dbg(priv, "failed cm send event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
spin_unlock_irqrestore(&priv->lock, flags);
}
netif_tx_unlock(dev);
}
int ipoib_cm_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return 0;
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0, NULL);
if (ret) {
printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
return 0;
err_listen:
ib_destroy_cm_id(priv->cm.id);
err_cm:
priv->cm.id = NULL;
return ret;
}
static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *rx, *n;
LIST_HEAD(list);
spin_lock_irq(&priv->lock);
list_splice_init(&priv->cm.rx_reap_list, &list);
spin_unlock_irq(&priv->lock);
list_for_each_entry_safe(rx, n, &list, list) {
ib_destroy_cm_id(rx->id);
ib_destroy_qp(rx->qp);
if (!ipoib_cm_has_srq(dev)) {
ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
}
kfree(rx);
}
}
void ipoib_cm_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned long begin;
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
return;
ib_destroy_cm_id(priv->cm.id);
priv->cm.id = NULL;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
/* Wait for all RX to be drained */
begin = jiffies;
while (!list_empty(&priv->cm.rx_error_list) ||
!list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list)) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "RX drain timing out\n");
/*
* assume the HW is wedged and just free up everything.
*/
list_splice_init(&priv->cm.rx_flush_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_error_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_drain_list,
&priv->cm.rx_reap_list);
break;
}
spin_unlock_irq(&priv->lock);
msleep(1);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
spin_unlock_irq(&priv->lock);
ipoib_cm_free_rx_reap_list(dev);
cancel_delayed_work(&priv->cm.stale_task);
}
static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_data *data = event->private_data;
struct sk_buff_head skqueue;
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
struct sk_buff *skb;
p->mtu = be32_to_cpu(data->mtu);
if (p->mtu <= IPOIB_ENCAP_LEN) {
ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
p->mtu, IPOIB_ENCAP_LEN);
return -EINVAL;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = 0 /* FIXME */;
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return ret;
}
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return ret;
}
skb_queue_head_init(&skqueue);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed "
"to requeue packet\n");
}
ret = ib_send_cm_rtu(cm_id, NULL, 0);
if (ret) {
ipoib_warn(priv, "failed to send RTU: %d\n", ret);
return ret;
}
return 0;
}
static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_init_attr attr = {
.send_cq = priv->recv_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
.cap.max_send_sge = 1,
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx
};
return ib_create_qp(priv->pd, &attr);
}
static int ipoib_cm_send_req(struct net_device *dev,
struct ib_cm_id *id, struct ib_qp *qp,
u32 qpn,
struct ib_sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_req_param req = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
req.primary_path = pathrec;
req.alternate_path = NULL;
req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
req.private_data_len = sizeof data;
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req.responder_resources = 4;
req.remote_cm_response_timeout = 20;
req.local_cm_response_timeout = 20;
req.retry_count = 0; /* RFC draft warns against retries */
req.rnr_retry_count = 0; /* RFC draft warns against retries */
req.max_cm_retries = 15;
req.srq = ipoib_cm_has_srq(dev);
return ib_send_cm_req(id, &req);
}
static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
if (ret) {
ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
return ret;
}
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
return ret;
}
return 0;
}
static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct ib_sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
int ret;
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
if (!p->tx_ring) {
ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
goto err_qp;
}
p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
if (IS_ERR(p->id)) {
ret = PTR_ERR(p->id);
ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
goto err_id;
}
ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
if (ret) {
ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
goto err_modify;
}
ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
if (ret) {
ipoib_warn(priv, "failed to send cm req: %d\n", ret);
goto err_send_cm;
}
ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
p->qp->qp_num, pathrec->dgid.raw, qpn);
return 0;
err_send_cm:
err_modify:
ib_destroy_cm_id(p->id);
err_id:
p->id = NULL;
ib_destroy_qp(p->qp);
err_qp:
p->qp = NULL;
vfree(p->tx_ring);
err_tx:
return ret;
}
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
if (p->id)
ib_destroy_cm_id(p->id);
if (p->tx_ring) {
/* Wait for all sends to complete */
begin = jiffies;
while ((int) p->tx_tail - (int) p->tx_head < 0) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "timing out; %d sends not completed\n",
p->tx_head - p->tx_tail);
goto timeout;
}
msleep(1);
}
}
timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
netif_tx_unlock_bh(p->dev);
}
if (p->qp)
ib_destroy_qp(p->qp);
vfree(p->tx_ring);
kfree(p);
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
unsigned long flags;
int ret;
switch (event->event) {
case IB_CM_DREQ_RECEIVED:
ipoib_dbg(priv, "DREQ received.\n");
ib_send_cm_drep(cm_id, NULL, 0);
break;
case IB_CM_REP_RECEIVED:
ipoib_dbg(priv, "REP received.\n");
ret = ipoib_cm_rep_handler(cm_id, event);
if (ret)
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_REQ_ERROR:
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
break;
default:
break;
}
return 0;
}
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
struct ipoib_neigh *neigh)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx;
tx = kzalloc(sizeof *tx, GFP_ATOMIC);
if (!tx)
return NULL;
neigh->cm = tx;
tx->neigh = neigh;
tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
queue_work(ipoib_workqueue, &priv->cm.start_task);
return tx;
}
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->dgid.raw);
tx->neigh = NULL;
}
}
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.start_task);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
int ret;
struct ib_sa_path_rec pathrec;
u32 qpn;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ret = ipoib_cm_tx_init(p, qpn, &pathrec);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
}
list_del(&p->list);
kfree(p);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_tx_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p;
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
unsigned mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif
dev_kfree_skb_any(skb);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
queue_work(ipoib_workqueue, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
{
ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
cm.rx_reap_task)->dev);
}
static void ipoib_cm_stale_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.stale_task.work);
struct ipoib_cm_rx *p;
int ret;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
/* List is sorted by LRU, start from tail,
* stop when we see a recently used entry */
p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
if (!list_empty(&priv->cm.passive_ids))
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
return sprintf(buf, "connected\n");
else
return sprintf(buf, "datagram\n");
}
static ssize_t set_mode(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (!rtnl_trylock())
return restart_syscall();
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
return count;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
rtnl_unlock();
ipoib_flush_paths(dev);
return count;
}
rtnl_unlock();
return -EINVAL;
}
static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
int ipoib_cm_add_mode_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_mode);
}
static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_srq_init_attr srq_init_attr = {
.srq_type = IB_SRQT_BASIC,
.attr = {
.max_wr = ipoib_recvq_size,
.max_sge = max_sge
}
};
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
}
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
}
}
int ipoib_cm_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, ret;
struct ib_device_attr attr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
INIT_LIST_HEAD(&priv->cm.start_list);
INIT_LIST_HEAD(&priv->cm.rx_error_list);
INIT_LIST_HEAD(&priv->cm.rx_flush_list);
INIT_LIST_HEAD(&priv->cm.rx_drain_list);
INIT_LIST_HEAD(&priv->cm.rx_reap_list);
INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
skb_queue_head_init(&priv->cm.skb_queue);
ret = ib_query_device(priv->ca, &attr);
if (ret) {
printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
return ret;
}
ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
ipoib_cm_create_srq(dev, attr.max_srq_sge);
if (ipoib_cm_has_srq(dev)) {
priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
priv->cm.num_frags = attr.max_srq_sge;
ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
priv->cm.max_cm_mtu, priv->cm.num_frags);
} else {
priv->cm.max_cm_mtu = IPOIB_CM_MTU;
priv->cm.num_frags = IPOIB_CM_RX_SG;
}
ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
priv->cm.srq_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -ENOMEM;
}
if (ipoib_cm_post_receive_srq(dev, i)) {
ipoib_warn(priv, "ipoib_cm_post_receive_srq "
"failed for buf %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -EIO;
}
}
}
priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
return 0;
}
void ipoib_cm_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (!priv->cm.srq)
return;
ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
ret = ib_destroy_srq(priv->cm.srq);
if (ret)
ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
return;
ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
priv->cm.srq_ring = NULL;
}
| gpl-2.0 |
buck101/superlinux | arch/um/kernel/exec.c | 4361 | 1201 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <as-layout.h>
#include <mem_user.h>
#include <skas.h>
#include <os.h>
void flush_thread(void)
{
void *data = NULL;
int ret;
arch_flush_thread(¤t->thread.arch);
ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data);
ret = ret || unmap(¤t->mm->context.id, STUB_END,
host_task_size - STUB_END, 1, &data);
if (ret) {
printk(KERN_ERR "flush_thread - clearing address space failed, "
"err = %d\n", ret);
force_sig(SIGKILL, current);
}
get_safe_registers(current_pt_regs()->regs.gp,
current_pt_regs()->regs.fp);
__switch_mm(¤t->mm->context.id);
}
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
current->ptrace &= ~PT_DTRACE;
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(regs->regs);
#endif
}
EXPORT_SYMBOL(start_thread);
| gpl-2.0 |
wan5xp/android_kernel_sony_u8500 | arch/ia64/kvm/vmm.c | 8457 | 2352 | /*
* vmm.c: vmm module interface with kvm module
*
* Copyright (c) 2007, Intel Corporation.
*
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include<linux/kernel.h>
#include<linux/module.h>
#include<asm/fpswa.h>
#include "vcpu.h"
MODULE_AUTHOR("Intel");
MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt;
extern char kvm_asm_mov_from_ar;
extern char kvm_asm_mov_from_ar_sn2;
extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = {
.module = THIS_MODULE,
.vmm_entry = vmm_entry,
.tramp_entry = vmm_trampoline,
.vmm_ivt = (unsigned long)&kvm_ia64_ivt,
.patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
.patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
};
static int __init kvm_vmm_init(void)
{
vmm_fpswa_interface = fpswa_interface;
/*Register vmm data to kvm side*/
return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
}
static void __exit kvm_vmm_exit(void)
{
kvm_exit();
return ;
}
void vmm_spin_lock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_lock(lock);
}
void vmm_spin_unlock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_unlock(lock);
}
static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
long psr;
local_irq_save(psr);
p->exit_reason = EXIT_REASON_DEBUG;
vmm_transition(vcpu);
local_irq_restore(psr);
}
asmlinkage int printk(const char *fmt, ...)
{
struct kvm_vcpu *vcpu = current_vcpu;
va_list args;
int r;
memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
va_start(args, fmt);
r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
va_end(args);
vcpu_debug_exit(vcpu);
return r;
}
module_init(kvm_vmm_init)
module_exit(kvm_vmm_exit)
| gpl-2.0 |
DingSoung/linux-3.0.1 | drivers/scsi/NCR_Q720.c | 9225 | 9384 | /* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR Quad 720 MCA SCSI Driver
*
* Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
*/
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mca.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "ncr53c8xx.h"
#include "NCR_Q720.h"
static struct ncr_chip q720_chip __initdata = {
.revision_id = 0x0f,
.burst_max = 3,
.offset_max = 8,
.nr_divisor = 4,
.features = FE_WIDE | FE_DIFF | FE_VARCLK,
};
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
MODULE_LICENSE("GPL");
#define NCR_Q720_VERSION "0.9"
/* We needs this helper because we have up to four hosts per struct device */
struct NCR_Q720_private {
struct device *dev;
void __iomem * mem_base;
__u32 phys_mem_base;
__u32 mem_size;
__u8 irq;
__u8 siops;
__u8 irq_enable;
struct Scsi_Host *hosts[4];
};
static struct scsi_host_template NCR_Q720_tpnt = {
.module = THIS_MODULE,
.proc_name = "NCR_Q720",
};
static irqreturn_t
NCR_Q720_intr(int irq, void *data)
{
struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
__u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
__u8 siop;
sir |= ~p->irq_enable;
if(sir == 0xff)
return IRQ_NONE;
while((siop = ffz(sir)) < p->siops) {
sir |= 1<<siop;
ncr53c8xx_intr(irq, p->hosts[siop]);
}
return IRQ_HANDLED;
}
static int __init
NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
int irq, int slot, __u32 paddr, void __iomem *vaddr)
{
struct ncr_device device;
__u8 scsi_id;
static int unit = 0;
__u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
__u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
__u8 version;
int error;
scsi_id = scsr1 >> 4;
/* enable burst length 16 (FIXME: should allow this) */
scsr1 |= 0x02;
/* force a siop reset */
scsr1 |= 0x04;
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
udelay(10);
version = readb(vaddr + 0x18) >> 4;
memset(&device, 0, sizeof(struct ncr_device));
/* Initialise ncr_device structure with items required by ncr_attach. */
device.chip = q720_chip;
device.chip.revision_id = version;
device.host_id = scsi_id;
device.dev = p->dev;
device.slot.base = paddr;
device.slot.base_c = paddr;
device.slot.base_v = vaddr;
device.slot.irq = irq;
device.differential = differential ? 2 : 0;
printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
(unsigned long)paddr, differential, version);
p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
if (!p->hosts[siop])
goto fail;
p->irq_enable |= (1<<siop);
scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
/* clear the disable interrupt bit */
scsr1 &= ~0x01;
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
error = scsi_add_host(p->hosts[siop], p->dev);
if (error)
ncr53c8xx_release(p->hosts[siop]);
else
scsi_scan_host(p->hosts[siop]);
return error;
fail:
return -ENODEV;
}
/* Detect a Q720 card. Note, because of the setup --- the chips are
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
static int __init
NCR_Q720_probe(struct device *dev)
{
struct NCR_Q720_private *p;
static int banner = 1;
struct mca_device *mca_dev = to_mca_device(dev);
int slot = mca_dev->slot;
int found = 0;
int irq, i, siops;
__u8 pos2, pos4, asr2, asr9, asr10;
__u16 io_base;
__u32 base_addr, mem_size;
void __iomem *mem_base;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
pos2 = mca_device_read_pos(mca_dev, 2);
/* enable device */
pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
mca_device_write_pos(mca_dev, 2, pos2);
io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
if(banner) {
printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
"NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
"NCR Q720:\n");
banner = 0;
}
io_base = mca_device_transform_ioport(mca_dev, io_base);
/* OK, this is phase one of the bootstrap, we now know the
* I/O space base address. All the configuration registers
* are mapped here (including pos) */
/* sanity check I/O mapping */
i = inb(io_base) | (inb(io_base+1)<<8);
if(i != NCR_Q720_MCA_ID) {
printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
kfree(p);
return -ENODEV;
}
/* Phase II, find the ram base and memory map the board register */
pos4 = inb(io_base + 4);
/* enable streaming data */
pos4 |= 0x01;
outb(pos4, io_base + 4);
base_addr = (pos4 & 0x7e) << 20;
base_addr += (pos4 & 0x80) << 23;
asr10 = inb(io_base + 0x12);
base_addr += (asr10 & 0x80) << 24;
base_addr += (asr10 & 0x70) << 23;
/* OK, got the base addr, now we need to find the ram size,
* enable and map it */
asr9 = inb(io_base + 0x11);
i = (asr9 & 0xc0) >> 6;
if(i == 0)
mem_size = 1024;
else
mem_size = 1 << (19 + i);
/* enable the sram mapping */
asr9 |= 0x20;
/* disable the rom mapping */
asr9 &= ~0x10;
outb(asr9, io_base + 0x11);
if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
(unsigned long)base_addr,
(unsigned long)(base_addr + mem_size));
goto out_free;
}
if (dma_declare_coherent_memory(dev, base_addr, base_addr,
mem_size, DMA_MEMORY_MAP)
!= DMA_MEMORY_MAP) {
printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
goto out_release_region;
}
/* The first 1k of the memory buffer is a memory map of the registers
*/
mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
1024);
if (IS_ERR(mem_base)) {
printk("NCR_Q720 failed to reserve memory mapped region\n");
goto out_release;
}
/* now also enable accesses in asr 2 */
asr2 = inb(io_base + 0x0a);
asr2 |= 0x01;
outb(asr2, io_base + 0x0a);
/* get the number of SIOPs (this should be 2 or 4) */
siops = ((asr2 & 0xe0) >> 5) + 1;
/* sanity check mapping (again) */
i = readw(mem_base);
if(i != NCR_Q720_MCA_ID) {
printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
goto out_release;
}
irq = readb(mem_base + 5) & 0x0f;
/* now do the bus related transforms */
irq = mca_device_transform_irq(mca_dev, irq);
printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
p->dev = dev;
p->mem_base = mem_base;
p->phys_mem_base = base_addr;
p->mem_size = mem_size;
p->irq = irq;
p->siops = siops;
if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
goto out_release;
}
/* disable all the siop interrupts */
for(i = 0; i < siops; i++) {
void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
__u8 scsr1 = readb(reg_scsr1);
scsr1 |= 0x01;
writeb(scsr1, reg_scsr1);
}
/* plumb in all 720 chips */
for (i = 0; i < siops; i++) {
void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
__u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
__u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
+ i*NCR_Q720_SIOP_SHIFT;
int err;
outb(0xff, port + 0x40);
outb(0x07, port + 0x41);
if ((err = NCR_Q720_probe_one(p, i, irq, slot,
siop_p_base, siop_v_base)) != 0)
printk("Q720: SIOP%d: probe failed, error = %d\n",
i, err);
else
found++;
}
if (!found) {
kfree(p);
return -ENODEV;
}
mca_device_set_claim(mca_dev, 1);
mca_device_set_name(mca_dev, "NCR_Q720");
dev_set_drvdata(dev, p);
return 0;
out_release:
dma_release_declared_memory(dev);
out_release_region:
release_mem_region(base_addr, mem_size);
out_free:
kfree(p);
return -ENODEV;
}
static void __exit
NCR_Q720_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
ncr53c8xx_release(host);
}
static int __exit
NCR_Q720_remove(struct device *dev)
{
struct NCR_Q720_private *p = dev_get_drvdata(dev);
int i;
for (i = 0; i < p->siops; i++)
if(p->hosts[i])
NCR_Q720_remove_one(p->hosts[i]);
dma_release_declared_memory(dev);
release_mem_region(p->phys_mem_base, p->mem_size);
free_irq(p->irq, p);
kfree(p);
return 0;
}
static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
static struct mca_driver NCR_Q720_driver = {
.id_table = NCR_Q720_id_table,
.driver = {
.name = "NCR_Q720",
.bus = &mca_bus_type,
.probe = NCR_Q720_probe,
.remove = __devexit_p(NCR_Q720_remove),
},
};
static int __init
NCR_Q720_init(void)
{
int ret = ncr53c8xx_init();
if (!ret)
ret = mca_register_driver(&NCR_Q720_driver);
if (ret)
ncr53c8xx_exit();
return ret;
}
static void __exit
NCR_Q720_exit(void)
{
mca_unregister_driver(&NCR_Q720_driver);
ncr53c8xx_exit();
}
module_init(NCR_Q720_init);
module_exit(NCR_Q720_exit);
| gpl-2.0 |
MattCrystal/clucking-goose | drivers/scsi/NCR_D700.c | 9225 | 10648 | /* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR Dual 700 MCA SCSI Driver
*
* Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
/* Notes:
*
* Most of the work is done in the chip specific module, 53c700.o
*
* TODO List:
*
* 1. Extract the SCSI ID from the voyager CMOS table (necessary to
* support multi-host environments.
*
* */
/* CHANGELOG
*
* Version 2.2
*
* Added mca_set_adapter_name().
*
* Version 2.1
*
* Modularise the driver into a Board piece (this file) and a chip
* piece 53c700.[ch] and 53c700.scr, added module options. You can
* now specify the scsi id by the parameters
*
* NCR_D700=slot:<n> [siop:<n>] id:<n> ....
*
* They need to be comma separated if compiled into the kernel
*
* Version 2.0
*
* Initial implementation of TCQ (Tag Command Queueing). TCQ is full
* featured and uses the clock algorithm to keep track of outstanding
* tags and guard against individual tag starvation. Also fixed a bug
* in all of the 1.x versions where the D700_data_residue() function
* was returning results off by 32 bytes (and thus causing the same 32
* bytes to be written twice corrupting the data block). It turns out
* the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
* like the 53c710 (The 710 is the only data manual still available,
* which I'd been using to program the 700).
*
* Version 1.2
*
* Much improved message handling engine
*
* Version 1.1
*
* Add code to handle selection reasonably correctly. By the time we
* get the selection interrupt, we've already responded, but drop off the
* bus and hope the selector will go away.
*
* Version 1.0:
*
* Initial release. Fully functional except for procfs and tag
* command queueing. Has only been tested on cards with 53c700-66
* chips and only single ended. Features are
*
* 1. Synchronous data transfers to offset 8 (limit of 700-66) and
* 100ns (10MHz) limit of SCSI-2
*
* 2. Disconnection and reselection
*
* Testing:
*
* I've only really tested this with the 700-66 chip, but have done
* soak tests in multi-device environments to verify that
* disconnections and reselections are being processed correctly.
* */
#define NCR_D700_VERSION "2.2"
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mca.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include "53c700.h"
#include "NCR_D700.h"
static char *NCR_D700; /* command line from insmod */
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
MODULE_LICENSE("GPL");
module_param(NCR_D700, charp, 0);
static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
{ [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static int __init
param_setup(char *string)
{
char *pos = string, *next;
int slot = -1, siop = -1;
while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
int val = (int)simple_strtoul(++next, NULL, 0);
if(!strncmp(pos, "slot:", 5))
slot = val;
else if(!strncmp(pos, "siop:", 5))
siop = val;
else if(!strncmp(pos, "id:", 3)) {
if(slot == -1) {
printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
} else if(slot > MCA_MAX_SLOT_NR) {
printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
} else {
if(siop != 0 && siop != 1) {
id_array[slot*2] = val;
id_array[slot*2 + 1] =val;
} else {
id_array[slot*2 + siop] = val;
}
}
}
if((pos = strchr(pos, ARG_SEP)) != NULL)
pos++;
}
return 1;
}
/* Host template. The 53c700 routine NCR_700_detect will
* fill in all of the missing routines */
static struct scsi_host_template NCR_D700_driver_template = {
.module = THIS_MODULE,
.name = "NCR Dual 700 MCA",
.proc_name = "NCR_D700",
.this_id = 7,
};
/* We needs this helper because we have two hosts per struct device */
struct NCR_D700_private {
struct device *dev;
struct Scsi_Host *hosts[2];
char name[30];
char pad;
};
static int __devinit
NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
int slot, u32 region, int differential)
{
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
int ret;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
if (!hostdata) {
printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
"data, detatching\n", siop);
return -ENOMEM;
}
if (!request_region(region, 64, "NCR_D700")) {
printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
region);
ret = -ENODEV;
goto region_failed;
}
/* Fill in the three required pieces of hostdata */
hostdata->base = ioport_map(region, 64);
hostdata->differential = (((1<<siop) & differential) != 0);
hostdata->clock = NCR_D700_CLOCK_MHZ;
hostdata->burst_length = 8;
/* and register the siop */
host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
if (!host) {
ret = -ENOMEM;
goto detect_failed;
}
p->hosts[siop] = host;
/* FIXME: read this from SUS */
host->this_id = id_array[slot * 2 + siop];
host->irq = irq;
host->base = region;
scsi_scan_host(host);
return 0;
detect_failed:
release_region(region, 64);
region_failed:
kfree(hostdata);
return ret;
}
static irqreturn_t
NCR_D700_intr(int irq, void *data)
{
struct NCR_D700_private *p = (struct NCR_D700_private *)data;
int i, found = 0;
for (i = 0; i < 2; i++)
if (p->hosts[i] &&
NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
found++;
return found ? IRQ_HANDLED : IRQ_NONE;
}
/* Detect a D700 card. Note, because of the setup --- the chips are
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
static int __devinit
NCR_D700_probe(struct device *dev)
{
struct NCR_D700_private *p;
int differential;
static int banner = 1;
struct mca_device *mca_dev = to_mca_device(dev);
int slot = mca_dev->slot;
int found = 0;
int irq, i;
int pos3j, pos3k, pos3a, pos3b, pos4;
__u32 base_addr, offset_addr;
/* enable board interrupt */
pos4 = mca_device_read_pos(mca_dev, 4);
pos4 |= 0x4;
mca_device_write_pos(mca_dev, 4, pos4);
mca_device_write_pos(mca_dev, 6, 9);
pos3j = mca_device_read_pos(mca_dev, 3);
mca_device_write_pos(mca_dev, 6, 10);
pos3k = mca_device_read_pos(mca_dev, 3);
mca_device_write_pos(mca_dev, 6, 0);
pos3a = mca_device_read_pos(mca_dev, 3);
mca_device_write_pos(mca_dev, 6, 1);
pos3b = mca_device_read_pos(mca_dev, 3);
base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
irq = (pos4 & 0x3) + 11;
if(irq >= 13)
irq++;
if(banner) {
printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
"NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
"NCR D700:\n");
banner = 0;
}
/* now do the bus related transforms */
irq = mca_device_transform_irq(mca_dev, irq);
base_addr = mca_device_transform_ioport(mca_dev, base_addr);
offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
/*outb(BOARD_RESET, base_addr);*/
/* clear any pending interrupts */
(void)inb(base_addr + 0x08);
/* get modctl, used later for setting diff bits */
switch(differential = (inb(base_addr + 0x08) >> 6)) {
case 0x00:
/* only SIOP1 differential */
differential = 0x02;
break;
case 0x01:
/* Both SIOPs differential */
differential = 0x03;
break;
case 0x03:
/* No SIOPs differential */
differential = 0x00;
break;
default:
printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
differential);
differential = 0x00;
break;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
printk(KERN_ERR "D700: request_irq failed\n");
kfree(p);
return -EBUSY;
}
/* plumb in both 700 chips */
for (i = 0; i < 2; i++) {
int err;
if ((err = NCR_D700_probe_one(p, i, irq, slot,
offset_addr + (0x80 * i),
differential)) != 0)
printk("D700: SIOP%d: probe failed, error = %d\n",
i, err);
else
found++;
}
if (!found) {
kfree(p);
return -ENODEV;
}
mca_device_set_claim(mca_dev, 1);
mca_device_set_name(mca_dev, "NCR_D700");
dev_set_drvdata(dev, p);
return 0;
}
static void __devexit
NCR_D700_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
NCR_700_release(host);
kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
free_irq(host->irq, host);
release_region(host->base, 64);
}
static int __devexit
NCR_D700_remove(struct device *dev)
{
struct NCR_D700_private *p = dev_get_drvdata(dev);
int i;
for (i = 0; i < 2; i++)
NCR_D700_remove_one(p->hosts[i]);
kfree(p);
return 0;
}
static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
static struct mca_driver NCR_D700_driver = {
.id_table = NCR_D700_id_table,
.driver = {
.name = "NCR_D700",
.bus = &mca_bus_type,
.probe = NCR_D700_probe,
.remove = __devexit_p(NCR_D700_remove),
},
};
static int __init NCR_D700_init(void)
{
#ifdef MODULE
if (NCR_D700)
param_setup(NCR_D700);
#endif
return mca_register_driver(&NCR_D700_driver);
}
static void __exit NCR_D700_exit(void)
{
mca_unregister_driver(&NCR_D700_driver);
}
module_init(NCR_D700_init);
module_exit(NCR_D700_exit);
__setup("NCR_D700=", param_setup);
| gpl-2.0 |
budi79/deka-kernel-msm7x30-3.0 | drivers/uwb/drp-ie.c | 11529 | 9767 | /*
* UWB DRP IE management.
*
* Copyright (C) 2005-2006 Intel Corporation
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/uwb.h>
#include "uwb-internal.h"
/*
* Return the reason code for a reservations's DRP IE.
*/
int uwb_rsv_reason_code(struct uwb_rsv *rsv)
{
static const int reason_codes[] = {
[UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
[UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return reason_codes[rsv->state];
}
/*
* Return the reason code for a reservations's companion DRP IE .
*/
int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
{
static const int companion_reason_codes[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return companion_reason_codes[rsv->state];
}
/*
* Return the status bit for a reservations's DRP IE.
*/
int uwb_rsv_status(struct uwb_rsv *rsv)
{
static const int statuses[] = {
[UWB_RSV_STATE_O_INITIATED] = 0,
[UWB_RSV_STATE_O_PENDING] = 0,
[UWB_RSV_STATE_O_MODIFIED] = 1,
[UWB_RSV_STATE_O_ESTABLISHED] = 1,
[UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
[UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
[UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
[UWB_RSV_STATE_T_ACCEPTED] = 1,
[UWB_RSV_STATE_T_CONFLICT] = 0,
[UWB_RSV_STATE_T_PENDING] = 0,
[UWB_RSV_STATE_T_DENIED] = 0,
[UWB_RSV_STATE_T_RESIZED] = 1,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
};
return statuses[rsv->state];
}
/*
* Return the status bit for a reservations's companion DRP IE .
*/
int uwb_rsv_companion_status(struct uwb_rsv *rsv)
{
static const int companion_statuses[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
};
return companion_statuses[rsv->state];
}
/*
* Allocate a DRP IE.
*
* To save having to free/allocate a DRP IE when its MAS changes,
* enough memory is allocated for the maxiumum number of DRP
* allocation fields. This gives an overhead per reservation of up to
* (UWB_NUM_ZONES - 1) * 4 = 60 octets.
*/
static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
{
struct uwb_ie_drp *drp_ie;
drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
GFP_KERNEL);
if (drp_ie) {
drp_ie->hdr.element_id = UWB_IE_DRP;
}
return drp_ie;
}
/*
* Fill a DRP IE's allocation fields from a MAS bitmap.
*/
static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
struct uwb_mas_bm *mas)
{
int z, i, num_fields = 0, next = 0;
struct uwb_drp_alloc *zones;
__le16 current_bmp;
DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
zones = drp_ie->allocs;
bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
/* Determine unique MAS bitmaps in zones from bitmap. */
for (z = 0; z < UWB_NUM_ZONES; z++) {
bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
bool found = false;
current_bmp = (__le16) *tmp_mas_bm;
for (i = 0; i < next; i++) {
if (current_bmp == zones[i].mas_bm) {
zones[i].zone_bm |= 1 << z;
found = true;
break;
}
}
if (!found) {
num_fields++;
zones[next].zone_bm = 1 << z;
zones[next].mas_bm = current_bmp;
next++;
}
}
bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
}
/* Store in format ready for transmission (le16). */
for (i = 0; i < num_fields; i++) {
drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
}
drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
+ num_fields * sizeof(struct uwb_drp_alloc);
}
/**
* uwb_drp_ie_update - update a reservation's DRP IE
* @rsv: the reservation
*/
int uwb_drp_ie_update(struct uwb_rsv *rsv)
{
struct uwb_ie_drp *drp_ie;
struct uwb_rsv_move *mv;
int unsafe;
if (rsv->state == UWB_RSV_STATE_NONE) {
kfree(rsv->drp_ie);
rsv->drp_ie = NULL;
return 0;
}
unsafe = rsv->mas.unsafe ? 1 : 0;
if (rsv->drp_ie == NULL) {
rsv->drp_ie = uwb_drp_ie_alloc();
if (rsv->drp_ie == NULL)
return -ENOMEM;
}
drp_ie = rsv->drp_ie;
uwb_ie_drp_set_unsafe(drp_ie, unsafe);
uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
uwb_ie_drp_set_type(drp_ie, rsv->type);
if (uwb_rsv_is_owner(rsv)) {
switch (rsv->target.type) {
case UWB_RSV_TARGET_DEV:
drp_ie->dev_addr = rsv->target.dev->dev_addr;
break;
case UWB_RSV_TARGET_DEVADDR:
drp_ie->dev_addr = rsv->target.devaddr;
break;
}
} else
drp_ie->dev_addr = rsv->owner->dev_addr;
uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
if (uwb_rsv_has_two_drp_ies(rsv)) {
mv = &rsv->mv;
if (mv->companion_drp_ie == NULL) {
mv->companion_drp_ie = uwb_drp_ie_alloc();
if (mv->companion_drp_ie == NULL)
return -ENOMEM;
}
drp_ie = mv->companion_drp_ie;
/* keep all the same configuration of the main drp_ie */
memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
/* FIXME: handle properly the unsafe bit */
uwb_ie_drp_set_unsafe(drp_ie, 1);
uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
}
rsv->ie_valid = true;
return 0;
}
/*
* Set MAS bits from given MAS bitmap in a single zone of large bitmap.
*
* We are given a zone id and the MAS bitmap of bits that need to be set in
* this zone. Note that this zone may already have bits set and this only
* adds settings - we cannot simply assign the MAS bitmap contents to the
* zone contents. We iterate over the the bits (MAS) in the zone and set the
* bits that are set in the given MAS bitmap.
*/
static
void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
{
int mas;
u16 mas_mask;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
mas_mask = 1 << mas;
if (mas_bm & mas_mask)
set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
}
}
/**
* uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
* @mas: MAS bitmap that will be populated to correspond to the
* allocation fields in the DRP IE
* @drp_ie: the DRP IE that contains the allocation fields.
*
* The input format is an array of MAS allocation fields (16 bit Zone
* bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
* 16.8.6. The output is a full 256 bit MAS bitmap.
*
* We go over all the allocation fields, for each allocation field we
* know which zones are impacted. We iterate over all the zones
* impacted and call a function that will set the correct MAS bits in
* each zone.
*/
void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
{
int numallocs = (drp_ie->hdr.length - 4) / 4;
const struct uwb_drp_alloc *alloc;
int cnt;
u16 zone_bm, mas_bm;
u8 zone;
u16 zone_mask;
bitmap_zero(bm->bm, UWB_NUM_MAS);
for (cnt = 0; cnt < numallocs; cnt++) {
alloc = &drp_ie->allocs[cnt];
zone_bm = le16_to_cpu(alloc->zone_bm);
mas_bm = le16_to_cpu(alloc->mas_bm);
for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
zone_mask = 1 << zone;
if (zone_bm & zone_mask)
uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
}
}
}
| gpl-2.0 |
GaloisInc/linux-deadline | net/bridge/netfilter/ebt_ip.c | 13577 | 3302 | /*
* ebt_ip
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
* Changes:
* added ip-sport and ip-dport
* Innominate Security Technologies AG <mhopf@innominate.com>
* September, 2002
*/
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/in.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip.h>
struct tcpudphdr {
__be16 src;
__be16 dst;
};
static bool
ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct iphdr *ih;
struct iphdr _iph;
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return false;
if (info->bitmask & EBT_IP_TOS &&
FWINV(info->tos != ih->tos, EBT_IP_TOS))
return false;
if (info->bitmask & EBT_IP_SOURCE &&
FWINV((ih->saddr & info->smsk) !=
info->saddr, EBT_IP_SOURCE))
return false;
if ((info->bitmask & EBT_IP_DEST) &&
FWINV((ih->daddr & info->dmsk) !=
info->daddr, EBT_IP_DEST))
return false;
if (info->bitmask & EBT_IP_PROTO) {
if (FWINV(info->protocol != ih->protocol, EBT_IP_PROTO))
return false;
if (!(info->bitmask & EBT_IP_DPORT) &&
!(info->bitmask & EBT_IP_SPORT))
return true;
if (ntohs(ih->frag_off) & IP_OFFSET)
return false;
pptr = skb_header_pointer(skb, ih->ihl*4,
sizeof(_ports), &_ports);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP_DPORT) {
u32 dst = ntohs(pptr->dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1],
EBT_IP_DPORT))
return false;
}
if (info->bitmask & EBT_IP_SPORT) {
u32 src = ntohs(pptr->src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1],
EBT_IP_SPORT))
return false;
}
}
return true;
}
static int ebt_ip_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (e->ethproto != htons(ETH_P_IP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
return 0;
}
static struct xt_match ebt_ip_mt_reg __read_mostly = {
.name = "ip",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip_mt,
.checkentry = ebt_ip_mt_check,
.matchsize = sizeof(struct ebt_ip_info),
.me = THIS_MODULE,
};
static int __init ebt_ip_init(void)
{
return xt_register_match(&ebt_ip_mt_reg);
}
static void __exit ebt_ip_fini(void)
{
xt_unregister_match(&ebt_ip_mt_reg);
}
module_init(ebt_ip_init);
module_exit(ebt_ip_fini);
MODULE_DESCRIPTION("Ebtables: IPv4 protocol packet match");
MODULE_LICENSE("GPL");
| gpl-2.0 |
LeMaker/linux-sunxi | drivers/video/fbtft/fb_ssd1306.c | 10 | 5474 | /*
* FB driver for the SSD1306 OLED Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_ssd1306"
#define WIDTH 128
#define HEIGHT 64
/*
write_reg() caveat:
This doesn't work because D/C has to be LOW for both values:
write_reg(par, val1, val2);
Do it like this:
write_reg(par, val1);
write_reg(par, val2);
*/
/* Init sequence taken from the Adafruit SSD1306 Arduino library */
static int init_display(struct fbtft_par *par)
{
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
par->fbtftops.reset(par);
if (par->gamma.curves[0] == 0) {
mutex_lock(&par->gamma.lock);
if (par->info->var.yres == 64)
par->gamma.curves[0] = 0xCF;
else
par->gamma.curves[0] = 0x8F;
mutex_unlock(&par->gamma.lock);
}
/* Set Display OFF */
write_reg(par, 0xAE);
/* Set Display Clock Divide Ratio/ Oscillator Frequency */
write_reg(par, 0xD5);
write_reg(par, 0x80);
/* Set Multiplex Ratio */
write_reg(par, 0xA8);
if (par->info->var.yres == 64)
write_reg(par, 0x3F);
else
write_reg(par, 0x1F);
/* Set Display Offset */
write_reg(par, 0xD3);
write_reg(par, 0x0);
/* Set Display Start Line */
write_reg(par, 0x40 | 0x0);
/* Charge Pump Setting */
write_reg(par, 0x8D);
/* A[2] = 1b, Enable charge pump during display on */
write_reg(par, 0x14);
/* Set Memory Addressing Mode */
write_reg(par, 0x20);
/* Vertical addressing mode */
write_reg(par, 0x01);
/*Set Segment Re-map */
/* column address 127 is mapped to SEG0 */
write_reg(par, 0xA0 | 0x1);
/* Set COM Output Scan Direction */
/* remapped mode. Scan from COM[N-1] to COM0 */
write_reg(par, 0xC8);
/* Set COM Pins Hardware Configuration */
write_reg(par, 0xDA);
if (par->info->var.yres == 64)
/* A[4]=1b, Alternative COM pin configuration */
write_reg(par, 0x12);
else
/* A[4]=0b, Sequential COM pin configuration */
write_reg(par, 0x02);
/* Set Pre-charge Period */
write_reg(par, 0xD9);
write_reg(par, 0xF1);
/* Set VCOMH Deselect Level */
write_reg(par, 0xDB);
/* according to the datasheet, this value is out of bounds */
write_reg(par, 0x40);
/* Entire Display ON */
/* Resume to RAM content display. Output follows RAM content */
write_reg(par, 0xA4);
/* Set Normal Display
0 in RAM: OFF in display panel
1 in RAM: ON in display panel */
write_reg(par, 0xA6);
/* Set Display ON */
write_reg(par, 0xAF);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
"%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, ye);
/* Set Lower Column Start Address for Page Addressing Mode */
write_reg(par, 0x00 | 0x0);
/* Set Higher Column Start Address for Page Addressing Mode */
write_reg(par, 0x10 | 0x0);
/* Set Display Start Line */
write_reg(par, 0x40 | 0x0);
}
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
write_reg(par, 0xAF);
return 0;
}
/* Gamma is used to control Contrast */
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
/* apply mask */
curves[0] &= 0xFF;
/* Set Contrast Control for BANK0 */
write_reg(par, 0x81);
write_reg(par, curves[0]);
return 0;
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_base;
u8 *buf = par->txbuf.buf;
int x, y, i;
int ret = 0;
fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
for (x = 0; x < par->info->var.xres; x++) {
for (y = 0; y < par->info->var.yres/8; y++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
*buf |= (vmem16[(y*8+i)*par->info->var.xres+x] ? 1 : 0) << i;
buf++;
}
}
/* Write data */
gpio_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf,
par->info->var.xres*par->info->var.yres/8);
if (ret < 0)
dev_err(par->info->device,
"%s: write failed and returned: %d\n", __func__, ret);
return ret;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.gamma_num = 1,
.gamma_len = 1,
.gamma = "00",
.fbtftops = {
.write_vmem = write_vmem,
.init_display = init_display,
.set_addr_win = set_addr_win,
.blank = blank,
.set_gamma = set_gamma,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_DESCRIPTION("SSD1306 OLED Driver");
MODULE_AUTHOR("Noralf Tronnes");
MODULE_LICENSE("GPL");
| gpl-2.0 |
lluixhi/coreboot | src/northbridge/amd/amdfam10/debug.c | 10 | 7553 | /*
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.
*/
/*
* Generic FAM10 debug code, used by mainboard specific romstage.c
*/
#include "pci.c"
#include <delay.h>
static inline void print_debug_addr(const char *str, void *val)
{
#if CONFIG_DEBUG_CAR
printk(BIOS_DEBUG, "------Address debug: %s%p------\n", str, val);
#endif
}
static void print_debug_pci_dev(u32 dev)
{
printk(BIOS_DEBUG, "PCI: %02x:%02x.%02x", (dev>>20) & 0xff, (dev>>15) & 0x1f, (dev>>12) & 0x7);
}
static inline void print_pci_devices(void)
{
device_t dev;
for(dev = PCI_DEV(0, 0, 0);
dev <= PCI_DEV(0xff, 0x1f, 0x7);
dev += PCI_DEV(0,0,1)) {
u32 id;
id = pci_read_config32(dev, PCI_VENDOR_ID);
if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0x0000)) {
continue;
}
print_debug_pci_dev(dev);
printk(BIOS_DEBUG, " %04x:%04x\n", (id & 0xffff), (id>>16));
if(((dev>>12) & 0x07) == 0) {
u8 hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
static inline void print_pci_devices_on_bus(u32 busn)
{
device_t dev;
for(dev = PCI_DEV(busn, 0, 0);
dev <= PCI_DEV(busn, 0x1f, 0x7);
dev += PCI_DEV(0,0,1)) {
u32 id;
id = pci_read_config32(dev, PCI_VENDOR_ID);
if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0x0000)) {
continue;
}
print_debug_pci_dev(dev);
printk(BIOS_DEBUG, " %04x:%04x\n", (id & 0xffff), (id>>16));
if(((dev>>12) & 0x07) == 0) {
u8 hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
static void dump_pci_device_range(u32 dev, u32 start_reg, u32 size)
{
int i;
print_debug_pci_dev(dev);
int j;
int end = start_reg + size;
for(i = start_reg; i < end; i+=4) {
u32 val;
if ((i & 0x0f) == 0) {
printk(BIOS_DEBUG, "\n%04x:",i);
}
val = pci_read_config32(dev, i);
for(j=0;j<4;j++) {
printk(BIOS_DEBUG, " %02x", val & 0xff);
val >>= 8;
}
}
printk(BIOS_DEBUG, "\n");
}
static void dump_pci_device(u32 dev)
{
dump_pci_device_range(dev, 0, 4096);
}
static void dump_pci_device_index_wait_range(u32 dev, u32 index_reg, u32 start,
u32 size)
{
int i;
int end = start + size;
print_debug_pci_dev(dev);
printk(BIOS_DEBUG, " -- index_reg=%08x", index_reg);
for(i = start; i < end; i++) {
u32 val;
int j;
printk(BIOS_DEBUG, "\n%02x:",i);
val = pci_read_config32_index_wait(dev, index_reg, i);
for(j=0;j<4;j++) {
printk(BIOS_DEBUG, " %02x", val & 0xff);
val >>= 8;
}
}
printk(BIOS_DEBUG, "\n");
}
static inline void dump_pci_device_index_wait(u32 dev, u32 index_reg)
{
dump_pci_device_index_wait_range(dev, index_reg, 0, 0x54);
dump_pci_device_index_wait_range(dev, index_reg, 0x100, 0x08); //DIMM1 when memclk > 400Hz
// dump_pci_device_index_wait_range(dev, index_reg, 0x200, 0x08); //DIMM2
// dump_pci_device_index_wait_range(dev, index_reg, 0x300, 0x08); //DIMM3
}
static inline void dump_pci_device_index(u32 dev, u32 index_reg, u32 type, u32 length)
{
int i;
print_debug_pci_dev(dev);
printk(BIOS_DEBUG, " index reg: %04x type: %02x", index_reg, type);
type<<=28;
for(i = 0; i < length; i++) {
u32 val;
if ((i & 0x0f) == 0) {
printk(BIOS_DEBUG, "\n%02x:",i);
}
val = pci_read_config32_index(dev, index_reg, i|type);
printk(BIOS_DEBUG, " %08x", val);
}
printk(BIOS_DEBUG, "\n");
}
static inline void dump_pci_devices(void)
{
device_t dev;
for(dev = PCI_DEV(0, 0, 0);
dev <= PCI_DEV(0xff, 0x1f, 0x7);
dev += PCI_DEV(0,0,1)) {
u32 id;
id = pci_read_config32(dev, PCI_VENDOR_ID);
if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0x0000)) {
continue;
}
dump_pci_device(dev);
if(((dev>>12) & 0x07) == 0) {
u8 hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
static inline void dump_pci_devices_on_bus(u32 busn)
{
device_t dev;
for(dev = PCI_DEV(busn, 0, 0);
dev <= PCI_DEV(busn, 0x1f, 0x7);
dev += PCI_DEV(0,0,1)) {
u32 id;
id = pci_read_config32(dev, PCI_VENDOR_ID);
if (((id & 0xffff) == 0x0000) || ((id & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0xffff) ||
(((id >> 16) & 0xffff) == 0x0000)) {
continue;
}
dump_pci_device(dev);
if(((dev>>12) & 0x07) == 0) {
u8 hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
#if CONFIG_DEBUG_SMBUS
static void dump_spd_registers(const struct mem_controller *ctrl)
{
int i;
printk(BIOS_DEBUG, "\n");
for(i = 0; i < DIMM_SOCKETS; i++) {
u32 device;
device = ctrl->spd_addr[i];
if (device) {
int j;
printk(BIOS_DEBUG, "dimm: %02x.0: %02x", i, device);
for(j = 0; j < 128; j++) {
int status;
u8 byte;
if ((j & 0xf) == 0) {
printk(BIOS_DEBUG, "\n%02x: ", j);
}
status = smbus_read_byte(device, j);
if (status < 0) {
break;
}
byte = status & 0xff;
printk(BIOS_DEBUG, "%02x ", byte);
}
printk(BIOS_DEBUG, "\n");
}
device = ctrl->spd_addr[i+DIMM_SOCKETS];
if (device) {
int j;
printk(BIOS_DEBUG, "dimm: %02x.1: %02x", i, device);
for(j = 0; j < 128; j++) {
int status;
u8 byte;
if ((j & 0xf) == 0) {
printk(BIOS_DEBUG, "\n%02x: ", j);
}
status = smbus_read_byte(device, j);
if (status < 0) {
break;
}
byte = status & 0xff;
printk(BIOS_DEBUG, "%02x ", byte);
}
printk(BIOS_DEBUG, "\n");
}
}
}
static void dump_smbus_registers(void)
{
u32 device;
printk(BIOS_DEBUG, "\n");
for(device = 1; device < 0x80; device++) {
int j;
if( smbus_read_byte(device, 0) < 0 ) continue;
printk(BIOS_DEBUG, "smbus: %02x", device);
for(j = 0; j < 256; j++) {
int status;
u8 byte;
status = smbus_read_byte(device, j);
if (status < 0) {
break;
}
if ((j & 0xf) == 0) {
printk(BIOS_DEBUG, "\n%02x: ",j);
}
byte = status & 0xff;
printk(BIOS_DEBUG, "%02x ", byte);
}
printk(BIOS_DEBUG, "\n");
}
}
#endif
static inline void dump_io_resources(u32 port)
{
int i;
udelay(2000);
printk(BIOS_DEBUG, "%04x:\n", port);
for(i=0;i<256;i++) {
u8 val;
if ((i & 0x0f) == 0) {
printk(BIOS_DEBUG, "%02x:", i);
}
val = inb(port);
printk(BIOS_DEBUG, " %02x",val);
if ((i & 0x0f) == 0x0f) {
printk(BIOS_DEBUG, "\n");
}
port++;
}
}
static inline void dump_mem(u32 start, u32 end)
{
u32 i;
printk(BIOS_DEBUG, "dump_mem:");
for(i=start;i<end;i++) {
if((i & 0xf)==0) {
printk(BIOS_DEBUG, "\n%08x:", i);
}
printk(BIOS_DEBUG, " %02x", (u8)*((u8 *)i));
}
printk(BIOS_DEBUG, "\n");
}
| gpl-2.0 |
jmarcet/kodi | xbmc/interfaces/json-rpc/PlayerOperations.cpp | 10 | 47213 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "PlayerOperations.h"
#include "Application.h"
#include "PlayListPlayer.h"
#include "guilib/GUIWindowManager.h"
#include "input/Key.h"
#include "GUIUserMessages.h"
#include "pictures/GUIWindowSlideShow.h"
#include "interfaces/Builtins.h"
#include "PartyModeManager.h"
#include "ApplicationMessenger.h"
#include "FileItem.h"
#include "VideoLibrary.h"
#include "video/VideoDatabase.h"
#include "AudioLibrary.h"
#include "GUIInfoManager.h"
#include "epg/EpgInfoTag.h"
#include "music/MusicDatabase.h"
#include "pvr/PVRManager.h"
#include "pvr/channels/PVRChannel.h"
#include "pvr/channels/PVRChannelGroupsContainer.h"
#include "pvr/recordings/PVRRecordings.h"
#include "cores/IPlayer.h"
#include "cores/playercorefactory/PlayerCoreConfig.h"
#include "cores/playercorefactory/PlayerCoreFactory.h"
#include "utils/SeekHandler.h"
using namespace JSONRPC;
using namespace PLAYLIST;
using namespace PVR;
JSONRPC_STATUS CPlayerOperations::GetActivePlayers(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
int activePlayers = GetActivePlayers();
result = CVariant(CVariant::VariantTypeArray);
if (activePlayers & Video)
{
CVariant video = CVariant(CVariant::VariantTypeObject);
video["playerid"] = GetPlaylist(Video);
video["type"] = "video";
result.append(video);
}
if (activePlayers & Audio)
{
CVariant audio = CVariant(CVariant::VariantTypeObject);
audio["playerid"] = GetPlaylist(Audio);
audio["type"] = "audio";
result.append(audio);
}
if (activePlayers & Picture)
{
CVariant picture = CVariant(CVariant::VariantTypeObject);
picture["playerid"] = GetPlaylist(Picture);
picture["type"] = "picture";
result.append(picture);
}
return OK;
}
JSONRPC_STATUS CPlayerOperations::GetPlayers(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
std::string media = parameterObject["media"].asString();
result = CVariant(CVariant::VariantTypeArray);
VECPLAYERCORES players;
if (media == "all")
CPlayerCoreFactory::Get().GetPlayers(players);
else
{
bool video = false;
if (media == "video")
video = true;
CPlayerCoreFactory::Get().GetPlayers(players, true, video);
}
for (VECPLAYERCORES::const_iterator itPlayer = players.begin(); itPlayer != players.end(); ++itPlayer)
{
PLAYERCOREID playerId = *itPlayer;
const CPlayerCoreConfig* playerConfig = CPlayerCoreFactory::Get().GetPlayerConfig(playerId);
if (playerConfig == NULL)
continue;
CVariant player(CVariant::VariantTypeObject);
player["playercoreid"] = static_cast<int>(playerId);
player["name"] = playerConfig->GetName();
switch (playerConfig->GetType())
{
case EPC_EXTPLAYER:
player["type"] = "external";
break;
case EPC_UPNPPLAYER:
player["type"] = "remote";
break;
default:
player["type"] = "internal";
break;
}
player["playsvideo"] = playerConfig->PlaysVideo();
player["playsaudio"] = playerConfig->PlaysAudio();
result.push_back(player);
}
return OK;
}
JSONRPC_STATUS CPlayerOperations::GetProperties(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
PlayerType player = GetPlayer(parameterObject["playerid"]);
CVariant properties = CVariant(CVariant::VariantTypeObject);
for (unsigned int index = 0; index < parameterObject["properties"].size(); index++)
{
std::string propertyName = parameterObject["properties"][index].asString();
CVariant property;
JSONRPC_STATUS ret;
if ((ret = GetPropertyValue(player, propertyName, property)) != OK)
return ret;
properties[propertyName] = property;
}
result = properties;
return OK;
}
JSONRPC_STATUS CPlayerOperations::GetItem(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
PlayerType player = GetPlayer(parameterObject["playerid"]);
CFileItemPtr fileItem;
switch (player)
{
case Video:
case Audio:
{
fileItem = CFileItemPtr(new CFileItem(g_application.CurrentFileItem()));
if (IsPVRChannel())
{
CPVRChannelPtr currentChannel(g_PVRManager.GetCurrentChannel());
if (currentChannel)
fileItem = CFileItemPtr(new CFileItem(currentChannel));
}
else if (player == Video)
{
if (!CVideoLibrary::FillFileItem(g_application.CurrentFile(), fileItem, parameterObject))
{
const CVideoInfoTag *currentVideoTag = g_infoManager.GetCurrentMovieTag();
if (currentVideoTag != NULL)
{
std::string originalLabel = fileItem->GetLabel();
fileItem->SetFromVideoInfoTag(*currentVideoTag);
if (fileItem->GetLabel().empty())
fileItem->SetLabel(originalLabel);
}
fileItem->SetPath(g_application.CurrentFileItem().GetPath());
}
}
else
{
if (!CAudioLibrary::FillFileItem(g_application.CurrentFile(), fileItem, parameterObject))
{
const MUSIC_INFO::CMusicInfoTag *currentMusicTag = g_infoManager.GetCurrentSongTag();
if (currentMusicTag != NULL)
{
std::string originalLabel = fileItem->GetLabel();
fileItem = CFileItemPtr(new CFileItem(*currentMusicTag));
if (fileItem->GetLabel().empty())
fileItem->SetLabel(originalLabel);
}
fileItem->SetPath(g_application.CurrentFileItem().GetPath());
}
}
if (IsPVRChannel())
break;
if (player == Video)
{
bool additionalInfo = false;
for (CVariant::const_iterator_array itr = parameterObject["properties"].begin_array(); itr != parameterObject["properties"].end_array(); itr++)
{
std::string fieldValue = itr->asString();
if (fieldValue == "cast" || fieldValue == "set" || fieldValue == "setid" || fieldValue == "showlink" || fieldValue == "resume" ||
(fieldValue == "streamdetails" && !fileItem->GetVideoInfoTag()->m_streamDetails.HasItems()))
additionalInfo = true;
}
CVideoDatabase videodatabase;
if ((additionalInfo) &&
videodatabase.Open())
{
if (additionalInfo)
{
switch (fileItem->GetVideoContentType())
{
case VIDEODB_CONTENT_MOVIES:
videodatabase.GetMovieInfo("", *(fileItem->GetVideoInfoTag()), fileItem->GetVideoInfoTag()->m_iDbId);
break;
case VIDEODB_CONTENT_MUSICVIDEOS:
videodatabase.GetMusicVideoInfo("", *(fileItem->GetVideoInfoTag()), fileItem->GetVideoInfoTag()->m_iDbId);
break;
case VIDEODB_CONTENT_EPISODES:
videodatabase.GetEpisodeInfo("", *(fileItem->GetVideoInfoTag()), fileItem->GetVideoInfoTag()->m_iDbId);
break;
case VIDEODB_CONTENT_TVSHOWS:
case VIDEODB_CONTENT_MOVIE_SETS:
default:
break;
}
}
videodatabase.Close();
}
}
else if (player == Audio)
{
if (fileItem->IsMusicDb())
{
CMusicDatabase musicdb;
CFileItemList items;
items.Add(fileItem);
CAudioLibrary::GetAdditionalSongDetails(parameterObject, items, musicdb);
}
}
break;
}
case Picture:
{
CGUIWindowSlideShow *slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (!slideshow)
return FailedToExecute;
CFileItemList slides;
slideshow->GetSlideShowContents(slides);
fileItem = slides[slideshow->CurrentSlide() - 1];
break;
}
case None:
default:
return FailedToExecute;
}
HandleFileItem("id", !IsPVRChannel(), "item", fileItem, parameterObject, parameterObject["properties"], result, false);
return OK;
}
JSONRPC_STATUS CPlayerOperations::PlayPause(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
CGUIWindowSlideShow *slideshow = NULL;
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
if (!g_application.m_pPlayer->CanPause())
return FailedToExecute;
if (parameterObject["play"].isString())
CBuiltins::Execute("playercontrol(play)");
else
{
if (parameterObject["play"].asBoolean())
{
if (g_application.m_pPlayer->IsPausedPlayback())
CApplicationMessenger::Get().MediaPause();
else if (g_application.m_pPlayer->GetPlaySpeed() != 1)
g_application.m_pPlayer->SetPlaySpeed(1, g_application.IsMutedInternal());
}
else if (!g_application.m_pPlayer->IsPausedPlayback())
CApplicationMessenger::Get().MediaPause();
}
result["speed"] = g_application.m_pPlayer->IsPausedPlayback() ? 0 : g_application.m_pPlayer->GetPlaySpeed();
return OK;
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow && slideshow->IsPlaying() &&
(parameterObject["play"].isString() ||
(parameterObject["play"].isBoolean() && parameterObject["play"].asBoolean() == slideshow->IsPaused())))
SendSlideshowAction(ACTION_PAUSE);
if (slideshow && slideshow->IsPlaying() && !slideshow->IsPaused())
result["speed"] = slideshow->GetDirection();
else
result["speed"] = 0;
return OK;
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Stop(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
CApplicationMessenger::Get().MediaStop(true, (int)parameterObject["playerid"].asInteger());
return ACK;
case Picture:
SendSlideshowAction(ACTION_STOP);
return ACK;
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::SetSpeed(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
if (parameterObject["speed"].isInteger())
{
int speed = (int)parameterObject["speed"].asInteger();
if (speed != 0)
{
// If the player is paused we first need to unpause
if (g_application.m_pPlayer->IsPausedPlayback())
g_application.m_pPlayer->Pause();
g_application.m_pPlayer->SetPlaySpeed(speed, g_application.IsMutedInternal());
}
else
g_application.m_pPlayer->Pause();
}
else if (parameterObject["speed"].isString())
{
if (parameterObject["speed"].asString().compare("increment") == 0)
CBuiltins::Execute("playercontrol(forward)");
else
CBuiltins::Execute("playercontrol(rewind)");
}
else
return InvalidParams;
result["speed"] = g_application.m_pPlayer->IsPausedPlayback() ? 0 : g_application.m_pPlayer->GetPlaySpeed();
return OK;
case Picture:
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Seek(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
PlayerType player = GetPlayer(parameterObject["playerid"]);
switch (player)
{
case Video:
case Audio:
{
if (!g_application.m_pPlayer->CanSeek())
return FailedToExecute;
const CVariant& value = parameterObject["value"];
if (IsType(value, NumberValue) ||
(value.isObject() && value.isMember("percentage")))
g_application.SeekPercentage(IsType(value, NumberValue) ? value.asFloat() : value["percentage"].asFloat());
else if (value.isString() ||
(value.isObject() && value.isMember("step")))
{
std::string step = value.isString() ? value.asString() : value["step"].asString();
if (step == "smallforward")
CBuiltins::Execute("playercontrol(smallskipforward)");
else if (step == "smallbackward")
CBuiltins::Execute("playercontrol(smallskipbackward)");
else if (step == "bigforward")
CBuiltins::Execute("playercontrol(bigskipforward)");
else if (step == "bigbackward")
CBuiltins::Execute("playercontrol(bigskipbackward)");
else
return InvalidParams;
}
else if (value.isObject() && value.isMember("seconds") && value.size() == 1)
CSeekHandler::Get().SeekSeconds(static_cast<int>(value["seconds"].asInteger()));
else if (value.isObject())
g_application.SeekTime(ParseTimeInSeconds(value.isMember("time") ? value["time"] : value));
else
return InvalidParams;
GetPropertyValue(player, "percentage", result["percentage"]);
GetPropertyValue(player, "time", result["time"]);
GetPropertyValue(player, "totaltime", result["totaltime"]);
return OK;
}
case Picture:
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Move(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
std::string direction = parameterObject["direction"].asString();
switch (GetPlayer(parameterObject["playerid"]))
{
case Picture:
if (direction == "left")
SendSlideshowAction(ACTION_MOVE_LEFT);
else if (direction == "right")
SendSlideshowAction(ACTION_MOVE_RIGHT);
else if (direction == "up")
SendSlideshowAction(ACTION_MOVE_UP);
else if (direction == "down")
SendSlideshowAction(ACTION_MOVE_DOWN);
else
return InvalidParams;
return ACK;
case Video:
case Audio:
if (direction == "left" || direction == "up")
CApplicationMessenger::Get().SendAction(CAction(ACTION_PREV_ITEM));
else if (direction == "right" || direction == "down")
CApplicationMessenger::Get().SendAction(CAction(ACTION_NEXT_ITEM));
else
return InvalidParams;
return ACK;
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Zoom(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
CVariant zoom = parameterObject["zoom"];
switch (GetPlayer(parameterObject["playerid"]))
{
case Picture:
if (zoom.isInteger())
SendSlideshowAction(ACTION_ZOOM_LEVEL_NORMAL + ((int)zoom.asInteger() - 1));
else if (zoom.isString())
{
std::string strZoom = zoom.asString();
if (strZoom == "in")
SendSlideshowAction(ACTION_ZOOM_IN);
else if (strZoom == "out")
SendSlideshowAction(ACTION_ZOOM_OUT);
else
return InvalidParams;
}
else
return InvalidParams;
return ACK;
case Video:
case Audio:
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Rotate(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Picture:
if (parameterObject["value"].asString().compare("clockwise") == 0)
SendSlideshowAction(ACTION_ROTATE_PICTURE_CW);
else
SendSlideshowAction(ACTION_ROTATE_PICTURE_CCW);
return ACK;
case Video:
case Audio:
case None:
default:
return FailedToExecute;
}
}
JSONRPC_STATUS CPlayerOperations::Open(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
CVariant options = parameterObject["options"];
CVariant optionShuffled = options["shuffled"];
CVariant optionRepeat = options["repeat"];
CVariant optionResume = options["resume"];
CVariant optionPlayer = options["playercoreid"];
if (parameterObject["item"].isObject() && parameterObject["item"].isMember("playlistid"))
{
int playlistid = (int)parameterObject["item"]["playlistid"].asInteger();
if (playlistid < PLAYLIST_PICTURE)
{
// Apply the "shuffled" option if available
if (optionShuffled.isBoolean())
g_playlistPlayer.SetShuffle(playlistid, optionShuffled.asBoolean(), false);
// Apply the "repeat" option if available
if (!optionRepeat.isNull())
g_playlistPlayer.SetRepeat(playlistid, (REPEAT_STATE)ParseRepeatState(optionRepeat), false);
}
int playlistStartPosition = (int)parameterObject["item"]["position"].asInteger();
switch (playlistid)
{
case PLAYLIST_MUSIC:
case PLAYLIST_VIDEO:
CApplicationMessenger::Get().MediaPlay(playlistid, playlistStartPosition);
OnPlaylistChanged();
break;
case PLAYLIST_PICTURE:
{
std::string firstPicturePath;
if (playlistStartPosition > 0)
{
CGUIWindowSlideShow *slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow != NULL)
{
CFileItemList list;
slideshow->GetSlideShowContents(list);
if (playlistStartPosition < list.Size())
firstPicturePath = list.Get(playlistStartPosition)->GetPath();
}
}
return StartSlideshow("", false, optionShuffled.isBoolean() && optionShuffled.asBoolean(), firstPicturePath);
break;
}
}
return ACK;
}
else if (parameterObject["item"].isObject() && parameterObject["item"].isMember("path"))
{
bool random = (optionShuffled.isBoolean() && optionShuffled.asBoolean()) ||
(!optionShuffled.isBoolean() && parameterObject["item"]["random"].asBoolean());
return StartSlideshow(parameterObject["item"]["path"].asString(), parameterObject["item"]["recursive"].asBoolean(), random);
}
else if (parameterObject["item"].isObject() && parameterObject["item"].isMember("partymode"))
{
if (g_partyModeManager.IsEnabled())
g_partyModeManager.Disable();
CApplicationMessenger::Get().ExecBuiltIn("playercontrol(partymode(" + parameterObject["item"]["partymode"].asString() + "))");
return ACK;
}
else if (parameterObject["item"].isObject() && parameterObject["item"].isMember("channelid"))
{
if (!g_PVRManager.IsStarted())
return FailedToExecute;
CPVRChannelGroupsContainer *channelGroupContainer = g_PVRChannelGroups;
if (channelGroupContainer == NULL)
return FailedToExecute;
CPVRChannelPtr channel = channelGroupContainer->GetChannelById((int)parameterObject["item"]["channelid"].asInteger());
if (channel == NULL)
return InvalidParams;
if ((g_PVRManager.IsPlayingRadio() && channel->IsRadio()) ||
(g_PVRManager.IsPlayingTV() && !channel->IsRadio()))
g_application.m_pPlayer->SwitchChannel(channel);
else
CApplicationMessenger::Get().MediaPlay(CFileItem(channel));
return ACK;
}
else if (parameterObject["item"].isObject() && parameterObject["item"].isMember("recordingid"))
{
if (!g_PVRManager.IsStarted())
return FailedToExecute;
CPVRRecordings *recordingsContainer = g_PVRRecordings;
if (recordingsContainer == NULL)
return FailedToExecute;
CFileItemPtr fileItem = recordingsContainer->GetById((int)parameterObject["item"]["recordingid"].asInteger());
if (fileItem == NULL)
return InvalidParams;
CApplicationMessenger::Get().MediaPlay(*fileItem);
return ACK;
}
else
{
CFileItemList list;
if (FillFileItemList(parameterObject["item"], list) && list.Size() > 0)
{
bool slideshow = true;
for (int index = 0; index < list.Size(); index++)
{
if (!list[index]->IsPicture())
{
slideshow = false;
break;
}
}
if (slideshow)
{
CGUIWindowSlideShow *slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (!slideshow)
return FailedToExecute;
SendSlideshowAction(ACTION_STOP);
slideshow->Reset();
for (int index = 0; index < list.Size(); index++)
slideshow->Add(list[index].get());
return StartSlideshow("", false, optionShuffled.isBoolean() && optionShuffled.asBoolean());
}
else
{
// Handle the "playerid" option
if (!optionPlayer.isNull())
{
PLAYERCOREID playerId = EPC_NONE;
if (optionPlayer.isInteger())
{
playerId = (PLAYERCOREID)optionPlayer.asInteger();
// check if the there's actually a player with the given player ID
if (CPlayerCoreFactory::Get().GetPlayerConfig(playerId) == NULL)
return InvalidParams;
// check if the player can handle at least the first item in the list
VECPLAYERCORES possiblePlayers;
CPlayerCoreFactory::Get().GetPlayers(*list.Get(0).get(), possiblePlayers);
VECPLAYERCORES::const_iterator matchingPlayer = std::find(possiblePlayers.begin(), possiblePlayers.end(), playerId);
if (matchingPlayer == possiblePlayers.end())
return InvalidParams;
}
else if (!optionPlayer.isString() || optionPlayer.asString().compare("default") != 0)
return InvalidParams;
// set the next player to be used
g_application.m_eForcedNextPlayer = playerId;
}
// Handle "shuffled" option
if (optionShuffled.isBoolean())
list.SetProperty("shuffled", optionShuffled);
// Handle "repeat" option
if (!optionRepeat.isNull())
list.SetProperty("repeat", ParseRepeatState(optionRepeat));
// Handle "resume" option
if (list.Size() == 1)
{
if (optionResume.isBoolean() && optionResume.asBoolean())
list[0]->m_lStartOffset = STARTOFFSET_RESUME;
else if (optionResume.isDouble())
list[0]->SetProperty("StartPercent", optionResume);
else if (optionResume.isObject())
list[0]->m_lStartOffset = (int)(ParseTimeInSeconds(optionResume) * 75.0);
}
CApplicationMessenger::Get().MediaPlay(list);
}
return ACK;
}
else
return InvalidParams;
}
return InvalidParams;
}
JSONRPC_STATUS CPlayerOperations::GoTo(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
CVariant to = parameterObject["to"];
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
if (to.isString())
{
std::string strTo = to.asString();
int actionID;
if (strTo == "previous")
actionID = ACTION_PREV_ITEM;
else if (strTo == "next")
actionID = ACTION_NEXT_ITEM;
else
return InvalidParams;
CApplicationMessenger::Get().SendAction(CAction(actionID));
}
else if (to.isInteger())
{
if (IsPVRChannel())
CApplicationMessenger::Get().SendAction(CAction(ACTION_CHANNEL_SWITCH, (float)to.asInteger()));
else
CApplicationMessenger::Get().PlayListPlayerPlay((int)to.asInteger());
}
else
return InvalidParams;
break;
case Picture:
if (to.isString())
{
std::string strTo = to.asString();
int actionID;
if (strTo == "previous")
actionID = ACTION_PREV_PICTURE;
else if (strTo == "next")
actionID = ACTION_NEXT_PICTURE;
else
return InvalidParams;
SendSlideshowAction(actionID);
}
else
return FailedToExecute;
break;
case None:
default:
return FailedToExecute;
}
OnPlaylistChanged();
return ACK;
}
JSONRPC_STATUS CPlayerOperations::SetShuffle(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
CGUIWindowSlideShow *slideshow = NULL;
CVariant shuffle = parameterObject["shuffle"];
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
{
if (IsPVRChannel())
return FailedToExecute;
int playlistid = GetPlaylist(GetPlayer(parameterObject["playerid"]));
if (g_playlistPlayer.IsShuffled(playlistid))
{
if ((shuffle.isBoolean() && !shuffle.asBoolean()) ||
(shuffle.isString() && shuffle.asString() == "toggle"))
{
CApplicationMessenger::Get().PlayListPlayerShuffle(playlistid, false);
OnPlaylistChanged();
}
}
else
{
if ((shuffle.isBoolean() && shuffle.asBoolean()) ||
(shuffle.isString() && shuffle.asString() == "toggle"))
{
CApplicationMessenger::Get().PlayListPlayerShuffle(playlistid, true);
OnPlaylistChanged();
}
}
break;
}
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow == NULL)
return FailedToExecute;
if (slideshow->IsShuffled())
{
if ((shuffle.isBoolean() && !shuffle.asBoolean()) ||
(shuffle.isString() && shuffle.asString() == "toggle"))
return FailedToExecute;
}
else
{
if ((shuffle.isBoolean() && shuffle.asBoolean()) ||
(shuffle.isString() && shuffle.asString() == "toggle"))
slideshow->Shuffle();
}
break;
default:
return FailedToExecute;
}
return ACK;
}
JSONRPC_STATUS CPlayerOperations::SetRepeat(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
case Audio:
{
if (IsPVRChannel())
return FailedToExecute;
REPEAT_STATE repeat = REPEAT_NONE;
int playlistid = GetPlaylist(GetPlayer(parameterObject["playerid"]));
if (parameterObject["repeat"].asString() == "cycle")
{
REPEAT_STATE repeatPrev = g_playlistPlayer.GetRepeat(playlistid);
if (repeatPrev == REPEAT_NONE)
repeat = REPEAT_ALL;
else if (repeatPrev == REPEAT_ALL)
repeat = REPEAT_ONE;
else
repeat = REPEAT_NONE;
}
else
repeat = (REPEAT_STATE)ParseRepeatState(parameterObject["repeat"]);
CApplicationMessenger::Get().PlayListPlayerRepeat(playlistid, repeat);
OnPlaylistChanged();
break;
}
case Picture:
default:
return FailedToExecute;
}
return ACK;
}
JSONRPC_STATUS CPlayerOperations::SetPartymode(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
PlayerType player = GetPlayer(parameterObject["playerid"]);
switch (player)
{
case Video:
case Audio:
{
if (IsPVRChannel())
return FailedToExecute;
bool change = false;
PartyModeContext context = PARTYMODECONTEXT_UNKNOWN;
std::string strContext;
if (player == Video)
{
context = PARTYMODECONTEXT_VIDEO;
strContext = "video";
}
else if (player == Audio)
{
context = PARTYMODECONTEXT_MUSIC;
strContext = "music";
}
bool toggle = parameterObject["partymode"].isString();
if (g_partyModeManager.IsEnabled())
{
if (g_partyModeManager.GetType() != context)
return InvalidParams;
if (toggle || parameterObject["partymode"].asBoolean() == false)
change = true;
}
else
{
if (toggle || parameterObject["partymode"].asBoolean() == true)
change = true;
}
if (change)
CApplicationMessenger::Get().ExecBuiltIn("playercontrol(partymode(" + strContext + "))");
break;
}
case Picture:
default:
return FailedToExecute;
}
return ACK;
}
JSONRPC_STATUS CPlayerOperations::SetAudioStream(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
if (g_application.m_pPlayer->HasPlayer())
{
int index = -1;
if (parameterObject["stream"].isString())
{
std::string action = parameterObject["stream"].asString();
if (action.compare("previous") == 0)
{
index = g_application.m_pPlayer->GetAudioStream() - 1;
if (index < 0)
index = g_application.m_pPlayer->GetAudioStreamCount() - 1;
}
else if (action.compare("next") == 0)
{
index = g_application.m_pPlayer->GetAudioStream() + 1;
if (index >= g_application.m_pPlayer->GetAudioStreamCount())
index = 0;
}
else
return InvalidParams;
}
else if (parameterObject["stream"].isInteger())
index = (int)parameterObject["stream"].asInteger();
if (index < 0 || g_application.m_pPlayer->GetAudioStreamCount() <= index)
return InvalidParams;
g_application.m_pPlayer->SetAudioStream(index);
}
else
return FailedToExecute;
break;
case Audio:
case Picture:
default:
return FailedToExecute;
}
return ACK;
}
JSONRPC_STATUS CPlayerOperations::SetSubtitle(const std::string &method, ITransportLayer *transport, IClient *client, const CVariant ¶meterObject, CVariant &result)
{
switch (GetPlayer(parameterObject["playerid"]))
{
case Video:
if (g_application.m_pPlayer->HasPlayer())
{
int index = -1;
if (parameterObject["subtitle"].isString())
{
std::string action = parameterObject["subtitle"].asString();
if (action.compare("previous") == 0)
{
index = g_application.m_pPlayer->GetSubtitle() - 1;
if (index < 0)
index = g_application.m_pPlayer->GetSubtitleCount() - 1;
}
else if (action.compare("next") == 0)
{
index = g_application.m_pPlayer->GetSubtitle() + 1;
if (index >= g_application.m_pPlayer->GetSubtitleCount())
index = 0;
}
else if (action.compare("off") == 0)
{
g_application.m_pPlayer->SetSubtitleVisible(false);
return ACK;
}
else if (action.compare("on") == 0)
{
g_application.m_pPlayer->SetSubtitleVisible(true);
return ACK;
}
else
return InvalidParams;
}
else if (parameterObject["subtitle"].isInteger())
index = (int)parameterObject["subtitle"].asInteger();
if (index < 0 || g_application.m_pPlayer->GetSubtitleCount() <= index)
return InvalidParams;
g_application.m_pPlayer->SetSubtitle(index);
// Check if we need to enable subtitles to be displayed
if (parameterObject["enable"].asBoolean() && !g_application.m_pPlayer->GetSubtitleVisible())
g_application.m_pPlayer->SetSubtitleVisible(true);
}
else
return FailedToExecute;
break;
case Audio:
case Picture:
default:
return FailedToExecute;
}
return ACK;
}
int CPlayerOperations::GetActivePlayers()
{
int activePlayers = 0;
if (g_application.m_pPlayer->IsPlayingVideo() || g_PVRManager.IsPlayingTV() || g_PVRManager.IsPlayingRecording())
activePlayers |= Video;
if (g_application.m_pPlayer->IsPlayingAudio() || g_PVRManager.IsPlayingRadio())
activePlayers |= Audio;
if (g_windowManager.IsWindowActive(WINDOW_SLIDESHOW))
activePlayers |= Picture;
return activePlayers;
}
PlayerType CPlayerOperations::GetPlayer(const CVariant &player)
{
int iPlayer = (int)player.asInteger();
PlayerType playerID;
switch (iPlayer)
{
case PLAYLIST_VIDEO:
playerID = Video;
break;
case PLAYLIST_MUSIC:
playerID = Audio;
break;
case PLAYLIST_PICTURE:
playerID = Picture;
break;
default:
playerID = None;
break;
}
if (GetPlaylist(playerID) == iPlayer)
return playerID;
else
return None;
}
int CPlayerOperations::GetPlaylist(PlayerType player)
{
int playlist = g_playlistPlayer.GetCurrentPlaylist();
if (playlist == PLAYLIST_NONE) // No active playlist, try guessing
playlist = g_application.m_pPlayer->GetPreferredPlaylist();
switch (player)
{
case Video:
return playlist == PLAYLIST_NONE ? PLAYLIST_VIDEO : playlist;
case Audio:
return playlist == PLAYLIST_NONE ? PLAYLIST_MUSIC : playlist;
case Picture:
return PLAYLIST_PICTURE;
default:
return playlist;
}
}
JSONRPC_STATUS CPlayerOperations::StartSlideshow(const std::string& path, bool recursive, bool random, const std::string &firstPicturePath /* = "" */)
{
int flags = 0;
if (recursive)
flags |= 1;
if (random)
flags |= 2;
else
flags |= 4;
std::vector<std::string> params;
params.push_back(path);
if (!firstPicturePath.empty())
params.push_back(firstPicturePath);
CGUIMessage msg(GUI_MSG_START_SLIDESHOW, 0, 0, flags);
msg.SetStringParams(params);
CApplicationMessenger::Get().SendGUIMessage(msg, WINDOW_SLIDESHOW, true);
return ACK;
}
void CPlayerOperations::SendSlideshowAction(int actionID)
{
CApplicationMessenger::Get().SendAction(CAction(actionID), WINDOW_SLIDESHOW);
}
void CPlayerOperations::OnPlaylistChanged()
{
CGUIMessage msg(GUI_MSG_PLAYLIST_CHANGED, 0, 0);
g_windowManager.SendThreadMessage(msg);
}
JSONRPC_STATUS CPlayerOperations::GetPropertyValue(PlayerType player, const std::string &property, CVariant &result)
{
if (player == None)
return FailedToExecute;
int playlist = GetPlaylist(player);
if (property == "type")
{
switch (player)
{
case Video:
result = "video";
break;
case Audio:
result = "audio";
break;
case Picture:
result = "picture";
break;
default:
return FailedToExecute;
}
}
else if (property == "partymode")
{
switch (player)
{
case Video:
case Audio:
if (IsPVRChannel())
{
result = false;
break;
}
result = g_partyModeManager.IsEnabled();
break;
case Picture:
result = false;
break;
default:
return FailedToExecute;
}
}
else if (property == "speed")
{
CGUIWindowSlideShow *slideshow = NULL;
switch (player)
{
case Video:
case Audio:
result = g_application.m_pPlayer->IsPausedPlayback() ? 0 : g_application.m_pPlayer->GetPlaySpeed();
break;
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow && slideshow->IsPlaying() && !slideshow->IsPaused())
result = slideshow->GetDirection();
else
result = 0;
break;
default:
return FailedToExecute;
}
}
else if (property == "time")
{
switch (player)
{
case Video:
case Audio:
{
int ms = 0;
if (!IsPVRChannel())
ms = (int)(g_application.GetTime() * 1000.0);
else
{
EPG::CEpgInfoTagPtr epg(GetCurrentEpg());
if (epg)
ms = epg->Progress() * 1000;
}
MillisecondsToTimeObject(ms, result);
break;
}
case Picture:
MillisecondsToTimeObject(0, result);
break;
default:
return FailedToExecute;
}
}
else if (property == "percentage")
{
CGUIWindowSlideShow *slideshow = NULL;
switch (player)
{
case Video:
case Audio:
{
if (!IsPVRChannel())
result = g_application.GetPercentage();
else
{
EPG::CEpgInfoTagPtr epg(GetCurrentEpg());
if (epg)
result = epg->ProgressPercentage();
else
result = 0;
}
break;
}
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow && slideshow->NumSlides() > 0)
result = (double)slideshow->CurrentSlide() / slideshow->NumSlides();
else
result = 0.0;
break;
default:
return FailedToExecute;
}
}
else if (property == "totaltime")
{
switch (player)
{
case Video:
case Audio:
{
int ms = 0;
if (!IsPVRChannel())
ms = (int)(g_application.GetTotalTime() * 1000.0);
else
{
EPG::CEpgInfoTagPtr epg(GetCurrentEpg());
if (epg)
ms = epg->GetDuration() * 1000;
}
MillisecondsToTimeObject(ms, result);
break;
}
case Picture:
MillisecondsToTimeObject(0, result);
break;
default:
return FailedToExecute;
}
}
else if (property == "playlistid")
{
result = playlist;
}
else if (property == "position")
{
CGUIWindowSlideShow *slideshow = NULL;
switch (player)
{
case Video:
case Audio: /* Return the position of current item if there is an active playlist */
if (!IsPVRChannel() && g_playlistPlayer.GetCurrentPlaylist() == playlist)
result = g_playlistPlayer.GetCurrentSong();
else
result = -1;
break;
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow && slideshow->IsPlaying())
result = slideshow->CurrentSlide() - 1;
else
result = -1;
break;
default:
result = -1;
break;
}
}
else if (property == "repeat")
{
switch (player)
{
case Video:
case Audio:
if (IsPVRChannel())
{
result = "off";
break;
}
switch (g_playlistPlayer.GetRepeat(playlist))
{
case REPEAT_ONE:
result = "one";
break;
case REPEAT_ALL:
result = "all";
break;
default:
result = "off";
break;
}
break;
case Picture:
default:
result = "off";
break;
}
}
else if (property == "shuffled")
{
CGUIWindowSlideShow *slideshow = NULL;
switch (player)
{
case Video:
case Audio:
if (IsPVRChannel())
{
result = false;
break;
}
result = g_playlistPlayer.IsShuffled(playlist);
break;
case Picture:
slideshow = (CGUIWindowSlideShow*)g_windowManager.GetWindow(WINDOW_SLIDESHOW);
if (slideshow && slideshow->IsPlaying())
result = slideshow->IsShuffled();
else
result = -1;
break;
default:
result = -1;
break;
}
}
else if (property == "canseek")
{
switch (player)
{
case Video:
case Audio:
result = g_application.m_pPlayer->CanSeek();
break;
case Picture:
default:
result = false;
break;
}
}
else if (property == "canchangespeed")
{
switch (player)
{
case Video:
case Audio:
result = !IsPVRChannel();
break;
case Picture:
default:
result = false;
break;
}
}
else if (property == "canmove")
{
switch (player)
{
case Picture:
result = true;
break;
case Video:
case Audio:
default:
result = false;
break;
}
}
else if (property == "canzoom")
{
switch (player)
{
case Picture:
result = true;
break;
case Video:
case Audio:
default:
result = false;
break;
}
}
else if (property == "canrotate")
{
switch (player)
{
case Picture:
result = true;
break;
case Video:
case Audio:
default:
result = false;
break;
}
}
else if (property == "canshuffle")
{
switch (player)
{
case Video:
case Audio:
case Picture:
result = !IsPVRChannel();
break;
default:
result = false;
break;
}
}
else if (property == "canrepeat")
{
switch (player)
{
case Video:
case Audio:
result = !IsPVRChannel();
break;
case Picture:
default:
result = false;
break;
}
}
else if (property == "currentaudiostream")
{
switch (player)
{
case Video:
case Audio:
if (g_application.m_pPlayer->HasPlayer())
{
result = CVariant(CVariant::VariantTypeObject);
int index = g_application.m_pPlayer->GetAudioStream();
if (index >= 0)
{
SPlayerAudioStreamInfo info;
g_application.m_pPlayer->GetAudioStreamInfo(index, info);
result["index"] = index;
result["name"] = info.name;
result["language"] = info.language;
result["codec"] = info.audioCodecName;
result["bitrate"] = info.bitrate;
result["channels"] = info.channels;
}
}
else
result = CVariant(CVariant::VariantTypeNull);
break;
case Picture:
default:
result = CVariant(CVariant::VariantTypeNull);
break;
}
}
else if (property == "audiostreams")
{
result = CVariant(CVariant::VariantTypeArray);
switch (player)
{
case Video:
if (g_application.m_pPlayer->HasPlayer())
{
for (int index = 0; index < g_application.m_pPlayer->GetAudioStreamCount(); index++)
{
SPlayerAudioStreamInfo info;
g_application.m_pPlayer->GetAudioStreamInfo(index, info);
CVariant audioStream(CVariant::VariantTypeObject);
audioStream["index"] = index;
audioStream["name"] = info.name;
audioStream["language"] = info.language;
audioStream["codec"] = info.audioCodecName;
audioStream["bitrate"] = info.bitrate;
audioStream["channels"] = info.channels;
result.append(audioStream);
}
}
break;
case Audio:
case Picture:
default:
break;
}
}
else if (property == "subtitleenabled")
{
switch (player)
{
case Video:
result = g_application.m_pPlayer->GetSubtitleVisible();
break;
case Audio:
case Picture:
default:
result = false;
break;
}
}
else if (property == "currentsubtitle")
{
switch (player)
{
case Video:
if (g_application.m_pPlayer->HasPlayer())
{
result = CVariant(CVariant::VariantTypeObject);
int index = g_application.m_pPlayer->GetSubtitle();
if (index >= 0)
{
SPlayerSubtitleStreamInfo info;
g_application.m_pPlayer->GetSubtitleStreamInfo(index, info);
result["index"] = index;
result["name"] = info.name;
result["language"] = info.language;
}
}
else
result = CVariant(CVariant::VariantTypeNull);
break;
case Audio:
case Picture:
default:
result = CVariant(CVariant::VariantTypeNull);
break;
}
}
else if (property == "subtitles")
{
result = CVariant(CVariant::VariantTypeArray);
switch (player)
{
case Video:
if (g_application.m_pPlayer->HasPlayer())
{
for (int index = 0; index < g_application.m_pPlayer->GetSubtitleCount(); index++)
{
SPlayerSubtitleStreamInfo info;
g_application.m_pPlayer->GetSubtitleStreamInfo(index, info);
CVariant subtitle(CVariant::VariantTypeObject);
subtitle["index"] = index;
subtitle["name"] = info.name;
subtitle["language"] = info.language;
result.append(subtitle);
}
}
break;
case Audio:
case Picture:
default:
break;
}
}
else if (property == "live")
result = IsPVRChannel();
else
return InvalidParams;
return OK;
}
int CPlayerOperations::ParseRepeatState(const CVariant &repeat)
{
REPEAT_STATE state = REPEAT_NONE;
std::string strState = repeat.asString();
if (strState.compare("one") == 0)
state = REPEAT_ONE;
else if (strState.compare("all") == 0)
state = REPEAT_ALL;
return state;
}
double CPlayerOperations::ParseTimeInSeconds(const CVariant &time)
{
double seconds = 0.0;
if (time.isObject())
{
if (time.isMember("hours"))
seconds += time["hours"].asInteger() * 60 * 60;
if (time.isMember("minutes"))
seconds += time["minutes"].asInteger() * 60;
if (time.isMember("seconds"))
seconds += time["seconds"].asInteger();
if (time.isMember("milliseconds"))
seconds += time["milliseconds"].asDouble() / 1000.0;
}
return seconds;
}
bool CPlayerOperations::IsPVRChannel()
{
return g_PVRManager.IsPlayingTV() || g_PVRManager.IsPlayingRadio();
}
EPG::CEpgInfoTagPtr CPlayerOperations::GetCurrentEpg()
{
if (!g_PVRManager.IsPlayingTV() && !g_PVRManager.IsPlayingRadio())
return EPG::CEpgInfoTagPtr();
CPVRChannelPtr currentChannel(g_PVRManager.GetCurrentChannel());
if (!currentChannel)
return EPG::CEpgInfoTagPtr();
return currentChannel->GetEPGNow();
}
| gpl-2.0 |
DDTChen/CookieVLC | vlc/contrib/android/nettle/testsuite/sha3-224-test.c | 10 | 99582 | #include "testutils.h"
void
test_main(void)
{
/* Extracted from ShortMsgKAT_224.txt using sha3.awk. */
test_hash(&nettle_sha3_224, /* 0 octets */
SHEX(""),
SHEX("F71837502BA8E10837BDD8D365ADB85591895602FC552B48B7390ABD"));
test_hash(&nettle_sha3_224, /* 1 octets */
SHEX("CC"),
SHEX("A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802"));
test_hash(&nettle_sha3_224, /* 2 octets */
SHEX("41FB"),
SHEX("615BA367AFDC35AAC397BC7EB5D58D106A734B24986D5D978FEFD62C"));
test_hash(&nettle_sha3_224, /* 3 octets */
SHEX("1F877C"),
SHEX("6F9D2898EFD096BAAAAAB2E97482DDB6389B8E6CAA964B7A0E347E13"));
test_hash(&nettle_sha3_224, /* 4 octets */
SHEX("C1ECFDFC"),
SHEX("E405869DA1464A705700A3CBCE131AABEEBA9C8D2FE6576B21BCBE16"));
test_hash(&nettle_sha3_224, /* 5 octets */
SHEX("21F134AC57"),
SHEX("5573DA2B02216A860389A581F6E9FB8D805E9E02F6FA911701EEE298"));
test_hash(&nettle_sha3_224, /* 6 octets */
SHEX("C6F50BB74E29"),
SHEX("163C9060163AA66B8B7C0CFAA65D934BFF219BCBC267187CABA0042F"));
test_hash(&nettle_sha3_224, /* 7 octets */
SHEX("119713CC83EEEF"),
SHEX("CFC04C6F8463DDAB24CDF8B8652BD11DF23DD1B95F118328DD01580E"));
test_hash(&nettle_sha3_224, /* 8 octets */
SHEX("4A4F202484512526"),
SHEX("7A5C2CB3F999DD00EFF7399963314CA647DD0E5AE1BDDEC611F8338D"));
test_hash(&nettle_sha3_224, /* 9 octets */
SHEX("1F66AB4185ED9B6375"),
SHEX("A5A75806083AA9307074EF8FBD7DF592985E5F714611E812216C0449"));
test_hash(&nettle_sha3_224, /* 10 octets */
SHEX("EED7422227613B6F53C9"),
SHEX("AC78FC53A1DB90A634F1AAAF90119C889C8C24B59B98B7366029CC73"));
test_hash(&nettle_sha3_224, /* 11 octets */
SHEX("EAEED5CDFFD89DECE455F1"),
SHEX("672CA6826686BEDB258532830D606B258C6DE60154EC0957CD8B858B"));
test_hash(&nettle_sha3_224, /* 12 octets */
SHEX("5BE43C90F22902E4FE8ED2D3"),
SHEX("D98CA07E172B0BC53D679D2F8D002C63FD24A6307F2B7E1EEEF28BE0"));
test_hash(&nettle_sha3_224, /* 13 octets */
SHEX("A746273228122F381C3B46E4F1"),
SHEX("F122BE39C91A6C17CD5900F531E680D54CEDEFD4F0E3D113D26543D4"));
test_hash(&nettle_sha3_224, /* 14 octets */
SHEX("3C5871CD619C69A63B540EB5A625"),
SHEX("2A26D2AD2015C67CABB7895EC5FA25473D4D1433FAE92B9B2CDA31F0"));
test_hash(&nettle_sha3_224, /* 15 octets */
SHEX("FA22874BCC068879E8EF11A69F0722"),
SHEX("A69E4EC1648CBBD595558EE4EA345E4196C2881E85E853739B1F4604"));
test_hash(&nettle_sha3_224, /* 16 octets */
SHEX("52A608AB21CCDD8A4457A57EDE782176"),
SHEX("5679CD509C5120AF54795CF477149641CF27B2EBB6A5F90340704E57"));
test_hash(&nettle_sha3_224, /* 17 octets */
SHEX("82E192E4043DDCD12ECF52969D0F807EED"),
SHEX("455584A1A3BBFBB977AE08DDEE93DA5ACAE0F2F4C3CDAAF089728AAE"));
test_hash(&nettle_sha3_224, /* 18 octets */
SHEX("75683DCB556140C522543BB6E9098B21A21E"),
SHEX("BB779E7267CAF0E891547EE3E3BABF17837671CF731ED56334F61CC3"));
test_hash(&nettle_sha3_224, /* 19 octets */
SHEX("06E4EFE45035E61FAAF4287B4D8D1F12CA97E5"),
SHEX("E7B181DAEC132D3B6C9DFBF61841135B87FB995BE20957B8CD095E2B"));
test_hash(&nettle_sha3_224, /* 20 octets */
SHEX("E26193989D06568FE688E75540AEA06747D9F851"),
SHEX("44729646A05AD0503A876B448F88F177A0A263AB746CA6E30676ADB2"));
test_hash(&nettle_sha3_224, /* 21 octets */
SHEX("D8DC8FDEFBDCE9D44E4CBAFE78447BAE3B5436102A"),
SHEX("05E15793E417DD4E02CD6C5636D42C1638C164D70B79F717F25D1A15"));
test_hash(&nettle_sha3_224, /* 22 octets */
SHEX("57085FD7E14216AB102D8317B0CB338A786D5FC32D8F"),
SHEX("2C4077A8858966EF79AAC3EC6D82855EAD22867BA45D617A68CB926E"));
test_hash(&nettle_sha3_224, /* 23 octets */
SHEX("A05404DF5DBB57697E2C16FA29DEFAC8AB3560D6126FA0"),
SHEX("2E897B479FBCBF42D2139F6768DF147A3B85C36A5B3F3C066EB0565E"));
test_hash(&nettle_sha3_224, /* 24 octets */
SHEX("AECBB02759F7433D6FCB06963C74061CD83B5B3FFA6F13C6"),
SHEX("BA76FFEFD006B81EF5991E697D0425621B16818EA27C11056E00904E"));
test_hash(&nettle_sha3_224, /* 25 octets */
SHEX("AAFDC9243D3D4A096558A360CC27C8D862F0BE73DB5E88AA55"),
SHEX("1C1E758D87399A36BF7C8A2E6A55CE6A4F0C498737956959959FD2AC"));
test_hash(&nettle_sha3_224, /* 26 octets */
SHEX("7BC84867F6F9E9FDC3E1046CAE3A52C77ED485860EE260E30B15"),
SHEX("DDEA76409C61F6D1873F01A34251C74C37B34F28F7F482A84395B5F3"));
test_hash(&nettle_sha3_224, /* 27 octets */
SHEX("FAC523575A99EC48279A7A459E98FF901918A475034327EFB55843"),
SHEX("777C523CF42D0006ED1F88F1BD0C3A5EF21814723794B8461A375C3A"));
test_hash(&nettle_sha3_224, /* 28 octets */
SHEX("0F8B2D8FCFD9D68CFFC17CCFB117709B53D26462A3F346FB7C79B85E"),
SHEX("8D7474ED6DEA4626AD3C1D06D2AD5B198CAAD07B12077C680CF6D89B"));
test_hash(&nettle_sha3_224, /* 29 octets */
SHEX("A963C3E895FF5A0BE4824400518D81412F875FA50521E26E85EAC90C04"),
SHEX("F525D4515D3CA54A2FAB9C679E93561FE151EA0960751352CD7F591A"));
test_hash(&nettle_sha3_224, /* 30 octets */
SHEX("03A18688B10CC0EDF83ADF0A84808A9718383C4070C6C4F295098699AC2C"),
SHEX("9A8455F41F693B91B3DE46BF66FF09D42DC300B856B1DC2DFD12555C"));
test_hash(&nettle_sha3_224, /* 31 octets */
SHEX("84FB51B517DF6C5ACCB5D022F8F28DA09B10232D42320FFC32DBECC3835B29"),
SHEX("81AF3A7A5BD4C1F948D6AF4B96F93C3B0CF9C0E7A6DA6FCD71EEC7F6"));
test_hash(&nettle_sha3_224, /* 32 octets */
SHEX("9F2FCC7C90DE090D6B87CD7E9718C1EA6CB21118FC2D5DE9F97E5DB6AC1E9C10"),
SHEX("A27A051A36A1501974AD8E9873E9DF231AA9AD90EC1D7A8BBF8F639A"));
test_hash(&nettle_sha3_224, /* 33 octets */
SHEX("DE8F1B3FAA4B7040ED4563C3B8E598253178E87E4D0DF75E4FF2F2DEDD5A0BE046"),
SHEX("F217812E362EC64D4DC5EACFABC165184BFA456E5C32C2C7900253D0"));
test_hash(&nettle_sha3_224, /* 34 octets */
SHEX("62F154EC394D0BC757D045C798C8B87A00E0655D0481A7D2D9FB58D93AEDC676B5A0"),
SHEX("5CA92B5F5830E1E5F8DF4391339DF7DF1F23BB31AA05437C103F1652"));
test_hash(&nettle_sha3_224, /* 35 octets */
SHEX("B2DCFE9FF19E2B23CE7DA2A4207D3E5EC7C6112A8A22AEC9675A886378E14E5BFBAD4E"),
SHEX("9F01F07D930F40A26407760104EFD10D4436295F6B8C41FE2A4E09EA"));
test_hash(&nettle_sha3_224, /* 36 octets */
SHEX("47F5697AC8C31409C0868827347A613A3562041C633CF1F1F86865A576E02835ED2C2492"),
SHEX("22A3FED1F4E298C37A1D7BA0C80E994B11D95F290F3945A3CEB2E2E6"));
test_hash(&nettle_sha3_224, /* 37 octets */
SHEX("512A6D292E67ECB2FE486BFE92660953A75484FF4C4F2ECA2B0AF0EDCDD4339C6B2EE4E542"),
SHEX("35F1AB1263211F738D3F97D0E4840C387E09369F23BF9239150D0306"));
test_hash(&nettle_sha3_224, /* 38 octets */
SHEX("973CF2B4DCF0BFA872B41194CB05BB4E16760A1840D8343301802576197EC19E2A1493D8F4FB"),
SHEX("34CC708B874D40478E82324BF3AA32FE9F85AFF8C60B4BADF97003E3"));
test_hash(&nettle_sha3_224, /* 39 octets */
SHEX("80BEEBCD2E3F8A9451D4499961C9731AE667CDC24EA020CE3B9AA4BBC0A7F79E30A934467DA4B0"),
SHEX("5F339B2F87E7F695B236267C819BA1705D97644AD72E0871C7E3A913"));
test_hash(&nettle_sha3_224, /* 40 octets */
SHEX("7ABAA12EC2A7347674E444140AE0FB659D08E1C66DECD8D6EAE925FA451D65F3C0308E29446B8ED3"),
SHEX("8E20D5C83CDA8226B58CEFD74C293CA7579CBB3949CA9EB2F61565B8"));
test_hash(&nettle_sha3_224, /* 41 octets */
SHEX("C88DEE9927679B8AF422ABCBACF283B904FF31E1CAC58C7819809F65D5807D46723B20F67BA610C2B7"),
SHEX("606255348812CFB5082F4D4BB6BBC2FEEF044E381FEB0E346061AA4F"));
test_hash(&nettle_sha3_224, /* 42 octets */
SHEX("01E43FE350FCEC450EC9B102053E6B5D56E09896E0DDD9074FE138E6038210270C834CE6EADC2BB86BF6"),
SHEX("C885274CC3BF110995FEF1154A86772F28B41E745E86E935B4E3A03F"));
test_hash(&nettle_sha3_224, /* 43 octets */
SHEX("337023370A48B62EE43546F17C4EF2BF8D7ECD1D49F90BAB604B839C2E6E5BD21540D29BA27AB8E309A4B7"),
SHEX("EFA7F7E7BFFA6A5E7F7D1C24E7A0A9DC9A6F72B3E9550A0AAA06CCE6"));
test_hash(&nettle_sha3_224, /* 44 octets */
SHEX("6892540F964C8C74BD2DB02C0AD884510CB38AFD4438AF31FC912756F3EFEC6B32B58EBC38FC2A6B913596A8"),
SHEX("ACA7DCCC6B809D511F4C248CAA5D1374E734C1ED6B995760CC3C56D2"));
test_hash(&nettle_sha3_224, /* 45 octets */
SHEX("F5961DFD2B1FFFFDA4FFBF30560C165BFEDAB8CE0BE525845DEB8DC61004B7DB38467205F5DCFB34A2ACFE96C0"),
SHEX("6F1EF55CCC6EF9B68DE54C14448487901022452AB761F84644E9A127"));
test_hash(&nettle_sha3_224, /* 46 octets */
SHEX("CA061A2EB6CEED8881CE2057172D869D73A1951E63D57261384B80CEB5451E77B06CF0F5A0EA15CA907EE1C27EBA"),
SHEX("B297F61FF06021BFE1B9D350B3F54D810BC16ADE17001BAE1B4CD4A2"));
test_hash(&nettle_sha3_224, /* 47 octets */
SHEX("1743A77251D69242750C4F1140532CD3C33F9B5CCDF7514E8584D4A5F9FBD730BCF84D0D4726364B9BF95AB251D9BB"),
SHEX("BE9A75436C3988FB2FE21D0C10EAD9B9C807DE2E13A9BD8437F13332"));
test_hash(&nettle_sha3_224, /* 48 octets */
SHEX("D8FABA1F5194C4DB5F176FABFFF856924EF627A37CD08CF55608BBA8F1E324D7C7F157298EABC4DCE7D89CE5162499F9"),
SHEX("4304582C3892942B1960822C965788B22DE19F1C6D5E204476ADFD26"));
test_hash(&nettle_sha3_224, /* 49 octets */
SHEX("BE9684BE70340860373C9C482BA517E899FC81BAAA12E5C6D7727975D1D41BA8BEF788CDB5CF4606C9C1C7F61AED59F97D"),
SHEX("0480EF8519C32F89C65B8DD450025EC49CBDADA6C4CFCFC6FB4F1C61"));
test_hash(&nettle_sha3_224, /* 50 octets */
SHEX("7E15D2B9EA74CA60F66C8DFAB377D9198B7B16DEB6A1BA0EA3C7EE2042F89D3786E779CF053C77785AA9E692F821F14A7F51"),
SHEX("0BDE9CD50D70F00EED97CCE40C3DF22BB4904C08C4177C3A95985D97"));
test_hash(&nettle_sha3_224, /* 51 octets */
SHEX("9A219BE43713BD578015E9FDA66C0F2D83CAC563B776AB9F38F3E4F7EF229CB443304FBA401EFB2BDBD7ECE939102298651C86"),
SHEX("3BF3ADDB761AB32A38B7B47047AD45B68EDFD88ED475227447EA1B1E"));
test_hash(&nettle_sha3_224, /* 52 octets */
SHEX("C8F2B693BD0D75EF99CAEBDC22ADF4088A95A3542F637203E283BBC3268780E787D68D28CC3897452F6A22AA8573CCEBF245972A"),
SHEX("6182614C8257EB05E9AC0950E15E6044872E5C0AB2AF4540764CA0C8"));
test_hash(&nettle_sha3_224, /* 53 octets */
SHEX("EC0F99711016C6A2A07AD80D16427506CE6F441059FD269442BAAA28C6CA037B22EEAC49D5D894C0BF66219F2C08E9D0E8AB21DE52"),
SHEX("0B5DC722EEA2C348325FD9B3D7F08F365B71D5B582C27BEB79B51D5D"));
test_hash(&nettle_sha3_224, /* 54 octets */
SHEX("0DC45181337CA32A8222FE7A3BF42FC9F89744259CFF653504D6051FE84B1A7FFD20CB47D4696CE212A686BB9BE9A8AB1C697B6D6A33"),
SHEX("29C2B817C75B6417BC89C262AF9D58F0C18FBD991F59F4181F237038"));
test_hash(&nettle_sha3_224, /* 55 octets */
SHEX("DE286BA4206E8B005714F80FB1CDFAEBDE91D29F84603E4A3EBC04686F99A46C9E880B96C574825582E8812A26E5A857FFC6579F63742F"),
SHEX("62C5876694D88007709B50900EE2E6CA9505CC90067EFBF4C1D95B0B"));
test_hash(&nettle_sha3_224, /* 56 octets */
SHEX("EEBCC18057252CBF3F9C070F1A73213356D5D4BC19AC2A411EC8CDEEE7A571E2E20EAF61FD0C33A0FFEB297DDB77A97F0A415347DB66BCAF"),
SHEX("D362BE7896B2AC3CA4DC3161B7F6C5B3FBE65F32D040402B8D306B15"));
test_hash(&nettle_sha3_224, /* 57 octets */
SHEX("416B5CDC9FE951BD361BD7ABFC120A5054758EBA88FDD68FD84E39D3B09AC25497D36B43CBE7B85A6A3CEBDA8DB4E5549C3EE51BB6FCB6AC1E"),
SHEX("D420C7BDF8D86D7B1CBD1AF7868EBC4FF17245595B94959A0714333C"));
test_hash(&nettle_sha3_224, /* 58 octets */
SHEX("5C5FAF66F32E0F8311C32E8DA8284A4ED60891A5A7E50FB2956B3CBAA79FC66CA376460E100415401FC2B8518C64502F187EA14BFC9503759705"),
SHEX("2E04DAE6E3FDF2A47FF40E6F3E61B371F3E51A5864A31CC11D127620"));
test_hash(&nettle_sha3_224, /* 59 octets */
SHEX("7167E1E02BE1A7CA69D788666F823AE4EEF39271F3C26A5CF7CEE05BCA83161066DC2E217B330DF821103799DF6D74810EED363ADC4AB99F36046A"),
SHEX("22817A21CFCEC4FD2348B6BE8A7042A37754D76A3F33A8F818312CC7"));
test_hash(&nettle_sha3_224, /* 60 octets */
SHEX("2FDA311DBBA27321C5329510FAE6948F03210B76D43E7448D1689A063877B6D14C4F6D0EAA96C150051371F7DD8A4119F7DA5C483CC3E6723C01FB7D"),
SHEX("68CAF2203317A8BED30C1792E888910124F2F0EE1D24D47274BCC856"));
test_hash(&nettle_sha3_224, /* 61 octets */
SHEX("95D1474A5AAB5D2422ACA6E481187833A6212BD2D0F91451A67DD786DFC91DFED51B35F47E1DEB8A8AB4B9CB67B70179CC26F553AE7B569969CE151B8D"),
SHEX("7BBAC0C0F192D2C479348358D2247E4C08966A512F73D40445B52EC7"));
test_hash(&nettle_sha3_224, /* 62 octets */
SHEX("C71BD7941F41DF044A2927A8FF55B4B467C33D089F0988AA253D294ADDBDB32530C0D4208B10D9959823F0C0F0734684006DF79F7099870F6BF53211A88D"),
SHEX("D226D9E1F36EC4222693699B6D0383C1452E391C41EFD7645289F8E3"));
test_hash(&nettle_sha3_224, /* 63 octets */
SHEX("F57C64006D9EA761892E145C99DF1B24640883DA79D9ED5262859DCDA8C3C32E05B03D984F1AB4A230242AB6B78D368DC5AAA1E6D3498D53371E84B0C1D4BA"),
SHEX("294A1E5A0629A2736F188691A35FE1ABB55472785DAFF6CD88C6D537"));
test_hash(&nettle_sha3_224, /* 64 octets */
SHEX("E926AE8B0AF6E53176DBFFCC2A6B88C6BD765F939D3D178A9BDE9EF3AA131C61E31C1E42CDFAF4B4DCDE579A37E150EFBEF5555B4C1CB40439D835A724E2FAE7"),
SHEX("C533DCF88CD1A5DFF22B914D3875BD57FC17B2E1F474AE360C3877D2"));
test_hash(&nettle_sha3_224, /* 65 octets */
SHEX("16E8B3D8F988E9BB04DE9C96F2627811C973CE4A5296B4772CA3EEFEB80A652BDF21F50DF79F32DB23F9F73D393B2D57D9A0297F7A2F2E79CFDA39FA393DF1AC00"),
SHEX("C9B7AD7A32B70DFB5A8A2FF9D98B300E484B996ED752A732D84DB6F7"));
test_hash(&nettle_sha3_224, /* 66 octets */
SHEX("FC424EEB27C18A11C01F39C555D8B78A805B88DBA1DC2A42ED5E2C0EC737FF68B2456D80EB85E11714FA3F8EABFB906D3C17964CB4F5E76B29C1765DB03D91BE37FC"),
SHEX("CF646D5E5C81818C97A01F393F8033CE3CB7CCD07FDAC9988766BD1C"));
test_hash(&nettle_sha3_224, /* 67 octets */
SHEX("ABE3472B54E72734BDBA7D9158736464251C4F21B33FBBC92D7FAC9A35C4E3322FF01D2380CBAA4EF8FB07D21A2128B7B9F5B6D9F34E13F39C7FFC2E72E47888599BA5"),
SHEX("D411E8A7CF50AAF91076A8CC5F01BF5B6BB2CCAE8046BF47871891FD"));
test_hash(&nettle_sha3_224, /* 68 octets */
SHEX("36F9F0A65F2CA498D739B944D6EFF3DA5EBBA57E7D9C41598A2B0E4380F3CF4B479EC2348D015FFE6256273511154AFCF3B4B4BF09D6C4744FDD0F62D75079D440706B05"),
SHEX("E094C0303D1841C6E4C0864857CF36CFC980E3CB4D78F18E301117C4"));
test_hash(&nettle_sha3_224, /* 69 octets */
SHEX("ABC87763CAE1CA98BD8C5B82CABA54AC83286F87E9610128AE4DE68AC95DF5E329C360717BD349F26B872528492CA7C94C2C1E1EF56B74DBB65C2AC351981FDB31D06C77A4"),
SHEX("51948E1772C2C2EE49158D02A975B27477BD041262954C3E60F5ACC2"));
test_hash(&nettle_sha3_224, /* 70 octets */
SHEX("94F7CA8E1A54234C6D53CC734BB3D3150C8BA8C5F880EAB8D25FED13793A9701EBE320509286FD8E422E931D99C98DA4DF7E70AE447BAB8CFFD92382D8A77760A259FC4FBD72"),
SHEX("8214A2B0E8BB60CD3E4DFB0D0855D0F6C4BA6D2728D0687BDF75F79E"));
test_hash(&nettle_sha3_224, /* 71 octets */
SHEX("13BD2811F6ED2B6F04FF3895ACEED7BEF8DCD45EB121791BC194A0F806206BFFC3B9281C2B308B1A729CE008119DD3066E9378ACDCC50A98A82E20738800B6CDDBE5FE9694AD6D"),
SHEX("8A2AE6B9AA7B1E08F8C7DC3BF5AE876660D30F79391714A175381091"));
test_hash(&nettle_sha3_224, /* 72 octets */
SHEX("1EED9CBA179A009EC2EC5508773DD305477CA117E6D569E66B5F64C6BC64801CE25A8424CE4A26D575B8A6FB10EAD3FD1992EDDDEEC2EBE7150DC98F63ADC3237EF57B91397AA8A7"),
SHEX("702B1906A63D0F924AFEC3BB5E5C5742E85F9834EA6F5306644811A1"));
test_hash(&nettle_sha3_224, /* 73 octets */
SHEX("BA5B67B5EC3A3FFAE2C19DD8176A2EF75C0CD903725D45C9CB7009A900C0B0CA7A2967A95AE68269A6DBF8466C7B6844A1D608AC661F7EFF00538E323DB5F2C644B78B2D48DE1A08AA"),
SHEX("BF2101511220B7DFE54B127C2476EAADFD4EAB7FD0F6BDD193078AC8"));
test_hash(&nettle_sha3_224, /* 74 octets */
SHEX("0EFA26AC5673167DCACAB860932ED612F65FF49B80FA9AE65465E5542CB62075DF1C5AE54FBA4DB807BE25B070033EFA223BDD5B1D3C94C6E1909C02B620D4B1B3A6C9FED24D70749604"),
SHEX("B07ADBED912723A07FA5353F665EC14FF82D85E90BE3E5A1F5C90FFF"));
test_hash(&nettle_sha3_224, /* 75 octets */
SHEX("BBFD933D1FD7BF594AC7F435277DC17D8D5A5B8E4D13D96D2F64E771ABBD51A5A8AEA741BECCBDDB177BCEA05243EBD003CFDEAE877CCA4DA94605B67691919D8B033F77D384CA01593C1B"),
SHEX("D1718F0D387AC427111A7E90E575DE5F04778EA2BA147A8451914FF0"));
test_hash(&nettle_sha3_224, /* 76 octets */
SHEX("90078999FD3C35B8AFBF4066CBDE335891365F0FC75C1286CDD88FA51FAB94F9B8DEF7C9AC582A5DBCD95817AFB7D1B48F63704E19C2BAA4DF347F48D4A6D603013C23F1E9611D595EBAC37C"),
SHEX("FAF7D793024E6D05E77C5231712478822C915292FCC1427E6ACFD3CF"));
test_hash(&nettle_sha3_224, /* 77 octets */
SHEX("64105ECA863515C20E7CFBAA0A0B8809046164F374D691CDBD6508AAABC1819F9AC84B52BAFC1B0FE7CDDBC554B608C01C8904C669D8DB316A0953A4C68ECE324EC5A49FFDB59A1BD6A292AA0E"),
SHEX("A375D756A8F39C72F67CA489C95F99350FFD0515B151A3BFF288CAAA"));
test_hash(&nettle_sha3_224, /* 78 octets */
SHEX("D4654BE288B9F3B711C2D02015978A8CC57471D5680A092AA534F7372C71CEAAB725A383C4FCF4D8DEAA57FCA3CE056F312961ECCF9B86F14981BA5BED6AB5B4498E1F6C82C6CAE6FC14845B3C8A"),
SHEX("1BD1B6F3144A3DEE93DEA1DF03C0E958F485B8AE164DCEE55F973413"));
test_hash(&nettle_sha3_224, /* 79 octets */
SHEX("12D9394888305AC96E65F2BF0E1B18C29C90FE9D714DD59F651F52B88B3008C588435548066EA2FC4C101118C91F32556224A540DE6EFDDBCA296EF1FB00341F5B01FECFC146BDB251B3BDAD556CD2"),
SHEX("BE88B495D0CD90281AF2094B8D7E72EB417288CA16F751C09694B682"));
test_hash(&nettle_sha3_224, /* 80 octets */
SHEX("871A0D7A5F36C3DA1DFCE57ACD8AB8487C274FAD336BC137EBD6FF4658B547C1DCFAB65F037AA58F35EF16AFF4ABE77BA61F65826F7BE681B5B6D5A1EA8085E2AE9CD5CF0991878A311B549A6D6AF230"),
SHEX("7DAC046254808464024617D63A038267FE2CA65052BDEB569A0A9C15"));
test_hash(&nettle_sha3_224, /* 81 octets */
SHEX("E90B4FFEF4D457BC7711FF4AA72231CA25AF6B2E206F8BF859D8758B89A7CD36105DB2538D06DA83BAD5F663BA11A5F6F61F236FD5F8D53C5E89F183A3CEC615B50C7C681E773D109FF7491B5CC22296C5"),
SHEX("89F6B320EFABE42CE13C9E20E4829F31A7848EEE3FC854E603FBD46F"));
test_hash(&nettle_sha3_224, /* 82 octets */
SHEX("E728DE62D75856500C4C77A428612CD804F30C3F10D36FB219C5CA0AA30726AB190E5F3F279E0733D77E7267C17BE27D21650A9A4D1E32F649627638DBADA9702C7CA303269ED14014B2F3CF8B894EAC8554"),
SHEX("A805DBD3B8DF5E03E05EFFFDE1B94B35A23C5D77C2797D984E56656F"));
test_hash(&nettle_sha3_224, /* 83 octets */
SHEX("6348F229E7B1DF3B770C77544E5166E081850FA1C6C88169DB74C76E42EB983FACB276AD6A0D1FA7B50D3E3B6FCD799EC97470920A7ABED47D288FF883E24CA21C7F8016B93BB9B9E078BDB9703D2B781B616E"),
SHEX("F05742CC1DB422A3113AC49602E8D0DD6CB472E7ED26BCE40BBA09BD"));
test_hash(&nettle_sha3_224, /* 84 octets */
SHEX("4B127FDE5DE733A1680C2790363627E63AC8A3F1B4707D982CAEA258655D9BF18F89AFE54127482BA01E08845594B671306A025C9A5C5B6F93B0A39522DC877437BE5C2436CBF300CE7AB6747934FCFC30AEAAF6"),
SHEX("45945F867B7E1E75EE496E0FC4AAFF71A0CC539841D153439AED4DFC"));
test_hash(&nettle_sha3_224, /* 85 octets */
SHEX("08461F006CFF4CC64B752C957287E5A0FAABC05C9BFF89D23FD902D324C79903B48FCB8F8F4B01F3E4DDB483593D25F000386698F5ADE7FAADE9615FDC50D32785EA51D49894E45BAA3DC707E224688C6408B68B11"),
SHEX("5A8AC7533E1354068B564CCD214EB2A2E097DD60E08BD69FC782B0AF"));
test_hash(&nettle_sha3_224, /* 86 octets */
SHEX("68C8F8849B120E6E0C9969A5866AF591A829B92F33CD9A4A3196957A148C49138E1E2F5C7619A6D5EDEBE995ACD81EC8BB9C7B9CFCA678D081EA9E25A75D39DB04E18D475920CE828B94E72241F24DB72546B352A0E4"),
SHEX("059F7EB983362FD44E94E2BFD59CCED43CAE959C9A483EBD5E6E2036"));
test_hash(&nettle_sha3_224, /* 87 octets */
SHEX("B8D56472954E31FB54E28FCA743F84D8DC34891CB564C64B08F7B71636DEBD64CA1EDBDBA7FC5C3E40049CE982BBA8C7E0703034E331384695E9DE76B5104F2FBC4535ECBEEBC33BC27F29F18F6F27E8023B0FBB6F563C"),
SHEX("22D62AD272FEFC89F73256EAACE00C7B8E998FB322C8EB67DC1EAC6A"));
test_hash(&nettle_sha3_224, /* 88 octets */
SHEX("0D58AC665FA84342E60CEFEE31B1A4EACDB092F122DFC68309077AED1F3E528F578859EE9E4CEFB4A728E946324927B675CD4F4AC84F64DB3DACFE850C1DD18744C74CECCD9FE4DC214085108F404EAB6D8F452B5442A47D"),
SHEX("A396EA905EB612554BD00E4FC1BB4C5247D73FDE4BBAF5380ED42DD0"));
test_hash(&nettle_sha3_224, /* 89 octets */
SHEX("1755E2D2E5D1C1B0156456B539753FF416651D44698E87002DCF61DCFA2B4E72F264D9AD591DF1FDEE7B41B2EB00283C5AEBB3411323B672EAA145C5125185104F20F335804B02325B6DEA65603F349F4D5D8B782DD3469CCD"),
SHEX("D8B5B24B9E92326FDE5DB1058EEDBEEDB0B65982925734B6E2844036"));
test_hash(&nettle_sha3_224, /* 90 octets */
SHEX("B180DE1A611111EE7584BA2C4B020598CD574AC77E404E853D15A101C6F5A2E5C801D7D85DC95286A1804C870BB9F00FD4DCB03AA8328275158819DCAD7253F3E3D237AEAA7979268A5DB1C6CE08A9EC7C2579783C8AFC1F91A7"),
SHEX("FDB9015B20DB446F79575E6B8C73A98EAC731CFE2E59BD46DBDA0E35"));
test_hash(&nettle_sha3_224, /* 91 octets */
SHEX("CF3583CBDFD4CBC17063B1E7D90B02F0E6E2EE05F99D77E24E560392535E47E05077157F96813544A17046914F9EFB64762A23CF7A49FE52A0A4C01C630CFE8727B81FB99A89FF7CC11DCA5173057E0417B8FE7A9EFBA6D95C555F"),
SHEX("DF1B47E73E8CBD2CA852CF58AD68B5F8BAA1169C0795961041E8A918"));
test_hash(&nettle_sha3_224, /* 92 octets */
SHEX("072FC02340EF99115BAD72F92C01E4C093B9599F6CFC45CB380EE686CB5EB019E806AB9BD55E634AB10AA62A9510CC0672CD3EDDB589C7DF2B67FCD3329F61B1A4441ECA87A33C8F55DA4FBBAD5CF2B2527B8E983BB31A2FADEC7523"),
SHEX("1E8A90918D6EAD31E446D4EE2673871ECC5C7DA9B18ED511E1632E0D"));
test_hash(&nettle_sha3_224, /* 93 octets */
SHEX("76EECF956A52649F877528146DE33DF249CD800E21830F65E90F0F25CA9D6540FDE40603230ECA6760F1139C7F268DEBA2060631EEA92B1FFF05F93FD5572FBE29579ECD48BC3A8D6C2EB4A6B26E38D6C5FBF2C08044AEEA470A8F2F26"),
SHEX("1060AFD1E1B9F7F41291A4861774B3B0C95A812788A41D7EBEF4A893"));
test_hash(&nettle_sha3_224, /* 94 octets */
SHEX("7ADC0B6693E61C269F278E6944A5A2D8300981E40022F839AC644387BFAC9086650085C2CDC585FEA47B9D2E52D65A2B29A7DC370401EF5D60DD0D21F9E2B90FAE919319B14B8C5565B0423CEFB827D5F1203302A9D01523498A4DB10374"),
SHEX("EA91EDC393491B4CBC035B8538DF08E3C6B8CAD18338053C81FE2E08"));
test_hash(&nettle_sha3_224, /* 95 octets */
SHEX("E1FFFA9826CCE8B86BCCEFB8794E48C46CDF372013F782ECED1E378269B7BE2B7BF51374092261AE120E822BE685F2E7A83664BCFBE38FE8633F24E633FFE1988E1BC5ACF59A587079A57A910BDA60060E85B5F5B6F776F0529639D9CCE4BD"),
SHEX("DF1AF149E5C92CB29174C1EDB6ED891EBCE4366010DC7CBFC9B1D757"));
test_hash(&nettle_sha3_224, /* 96 octets */
SHEX("69F9ABBA65592EE01DB4DCE52DBAB90B08FC04193602792EE4DAA263033D59081587B09BBE49D0B49C9825D22840B2FF5D9C5155F975F8F2C2E7A90C75D2E4A8040FE39F63BBAFB403D9E28CC3B86E04E394A9C9E8065BD3C85FA9F0C7891600"),
SHEX("5F698408BFF0246B05BAD96CB342B2FD2F11B6804EF2FA07A81B0920"));
test_hash(&nettle_sha3_224, /* 97 octets */
SHEX("38A10A352CA5AEDFA8E19C64787D8E9C3A75DBF3B8674BFAB29B5DBFC15A63D10FAE66CD1A6E6D2452D557967EAAD89A4C98449787B0B3164CA5B717A93F24EB0B506CEB70CBBCB8D72B2A72993F909AAD92F044E0B5A2C9AC9CB16A0CA2F81F49"),
SHEX("EBE6D61E8A946E0D45D3889F9E360ACD3A1A7D6C4B1307448E6E7357"));
test_hash(&nettle_sha3_224, /* 98 octets */
SHEX("6D8C6E449BC13634F115749C248C17CD148B72157A2C37BF8969EA83B4D6BA8C0EE2711C28EE11495F43049596520CE436004B026B6C1F7292B9C436B055CBB72D530D860D1276A1502A5140E3C3F54A93663E4D20EDEC32D284E25564F624955B52"),
SHEX("1B7F6BCB2271AC9C3B558E95F85285EE756B03B767A01AC57D7C6E94"));
test_hash(&nettle_sha3_224, /* 99 octets */
SHEX("6EFCBCAF451C129DBE00B9CEF0C3749D3EE9D41C7BD500ADE40CDC65DEDBBBADB885A5B14B32A0C0D087825201E303288A733842FA7E599C0C514E078F05C821C7A4498B01C40032E9F1872A1C925FA17CE253E8935E4C3C71282242CB716B2089CCC1"),
SHEX("436D1BCD6B3DE2677A72B93E2CEDB60C84A4FE125A802E2997EB2E67"));
test_hash(&nettle_sha3_224, /* 100 octets */
SHEX("433C5303131624C0021D868A30825475E8D0BD3052A022180398F4CA4423B98214B6BEAAC21C8807A2C33F8C93BD42B092CC1B06CEDF3224D5ED1EC29784444F22E08A55AA58542B524B02CD3D5D5F6907AFE71C5D7462224A3F9D9E53E7E0846DCBB4CE"),
SHEX("62B10F1B6236EBC2DA72957742A8D4E48E213B5F8934604BFD4D2C3A"));
test_hash(&nettle_sha3_224, /* 101 octets */
SHEX("A873E0C67CA639026B6683008F7AA6324D4979550E9BCE064CA1E1FB97A30B147A24F3F666C0A72D71348EDE701CF2D17E2253C34D1EC3B647DBCEF2F879F4EB881C4830B791378C901EB725EA5C172316C6D606E0AF7DF4DF7F76E490CD30B2BADF45685F"),
SHEX("1186BEA0880D0A96F6A56BBB431F4D264838BB0180DCF66EF0B599CA"));
test_hash(&nettle_sha3_224, /* 102 octets */
SHEX("006917B64F9DCDF1D2D87C8A6173B64F6587168E80FAA80F82D84F60301E561E312D9FBCE62F39A6FB476E01E925F26BCC91DE621449BE6504C504830AAE394096C8FC7694651051365D4EE9070101EC9B68086F2EA8F8AB7B811EA8AD934D5C9B62C60A4771"),
SHEX("383D2F41ECFDA5994E815432999D192E1A282FF5663196A4A268A67D"));
test_hash(&nettle_sha3_224, /* 103 octets */
SHEX("F13C972C52CB3CC4A4DF28C97F2DF11CE089B815466BE88863243EB318C2ADB1A417CB1041308598541720197B9B1CB5BA2318BD5574D1DF2174AF14884149BA9B2F446D609DF240CE335599957B8EC80876D9A085AE084907BC5961B20BF5F6CA58D5DAB38ADB"),
SHEX("E2594A633B2DC671FD0DDFD3BF7238332C425520827C524FB0E19778"));
test_hash(&nettle_sha3_224, /* 104 octets */
SHEX("E35780EB9799AD4C77535D4DDB683CF33EF367715327CF4C4A58ED9CBDCDD486F669F80189D549A9364FA82A51A52654EC721BB3AAB95DCEB4A86A6AFA93826DB923517E928F33E3FBA850D45660EF83B9876ACCAFA2A9987A254B137C6E140A21691E1069413848"),
SHEX("234764AAE8C39B1571D7741BB176FF86246070EC9AC97A1B2EB35472"));
test_hash(&nettle_sha3_224, /* 105 octets */
SHEX("64EC021C9585E01FFE6D31BB50D44C79B6993D72678163DB474947A053674619D158016ADB243F5C8D50AA92F50AB36E579FF2DABB780A2B529370DAA299207CFBCDD3A9A25006D19C4F1FE33E4B1EAEC315D8C6EE1E730623FD1941875B924EB57D6D0C2EDC4E78D6"),
SHEX("A634D7EBAA2BC0043EB5E237690E38FF1E05EE5A042882A233A2D92A"));
test_hash(&nettle_sha3_224, /* 106 octets */
SHEX("5954BAB512CF327D66B5D9F296180080402624AD7628506B555EEA8382562324CF452FBA4A2130DE3E165D11831A270D9CB97CE8C2D32A96F50D71600BB4CA268CF98E90D6496B0A6619A5A8C63DB6D8A0634DFC6C7EC8EA9C006B6C456F1B20CD19E781AF20454AC880"),
SHEX("EF03FBB1EF3296EECFB98909E416D113B5741E44962EC57993C6DA5D"));
test_hash(&nettle_sha3_224, /* 107 octets */
SHEX("03D9F92B2C565709A568724A0AFF90F8F347F43B02338F94A03ED32E6F33666FF5802DA4C81BDCE0D0E86C04AFD4EDC2FC8B4141C2975B6F07639B1994C973D9A9AFCE3D9D365862003498513BFA166D2629E314D97441667B007414E739D7FEBF0FE3C32C17AA188A8683"),
SHEX("210D245CC8B5A7B4C1B118A9890ECDDC34A66EA92805B7A7C19A944A"));
test_hash(&nettle_sha3_224, /* 108 octets */
SHEX("F31E8B4F9E0621D531D22A380BE5D9ABD56FAEC53CBD39B1FAB230EA67184440E5B1D15457BD25F56204FA917FA48E669016CB48C1FFC1E1E45274B3B47379E00A43843CF8601A5551411EC12503E5AAC43D8676A1B2297EC7A0800DBFEE04292E937F21C005F17411473041"),
SHEX("517BAE010715A020435CFDB531B856C5704E0EC611360F60D5B76161"));
test_hash(&nettle_sha3_224, /* 109 octets */
SHEX("758EA3FEA738973DB0B8BE7E599BBEF4519373D6E6DCD7195EA885FC991D896762992759C2A09002912FB08E0CB5B76F49162AEB8CF87B172CF3AD190253DF612F77B1F0C532E3B5FC99C2D31F8F65011695A087A35EE4EEE5E334C369D8EE5D29F695815D866DA99DF3F79403"),
SHEX("79D478B4BC5E6FC2D406BB1C3834A5CE397A88E80135F55D8FE32C5E"));
test_hash(&nettle_sha3_224, /* 110 octets */
SHEX("47C6E0C2B74948465921868804F0F7BD50DD323583DC784F998A93CD1CA4C6EF84D41DC81C2C40F34B5BEE6A93867B3BDBA0052C5F59E6F3657918C382E771D33109122CC8BB0E1E53C4E3D13B43CE44970F5E0C079D2AD7D7A3549CD75760C21BB15B447589E86E8D76B1E9CED2"),
SHEX("F7BA7A56AFC1C58E62841C3B98F5677199F24B534B0D52D9A5C95495"));
test_hash(&nettle_sha3_224, /* 111 octets */
SHEX("F690A132AB46B28EDFA6479283D6444E371C6459108AFD9C35DBD235E0B6B6FF4C4EA58E7554BD002460433B2164CA51E868F7947D7D7A0D792E4ABF0BE5F450853CC40D85485B2B8857EA31B5EA6E4CCFA2F3A7EF3380066D7D8979FDAC618AAD3D7E886DEA4F005AE4AD05E5065F"),
SHEX("78A90B769E9A326C93D5A6A6105AEE031DCEB2C8D222B36E02F27DB6"));
test_hash(&nettle_sha3_224, /* 112 octets */
SHEX("58D6A99BC6458824B256916770A8417040721CCCFD4B79EACD8B65A3767CE5BA7E74104C985AC56B8CC9AEBD16FEBD4CDA5ADB130B0FF2329CC8D611EB14DAC268A2F9E633C99DE33997FEA41C52A7C5E1317D5B5DAED35EBA7D5A60E45D1FA7EAABC35F5C2B0A0F2379231953322C4E"),
SHEX("3D9D5C2FD2F60F4BB89E11FD3BC2FBD65602EB3F3F38D6FA03BDCE2C"));
test_hash(&nettle_sha3_224, /* 113 octets */
SHEX("BEFAB574396D7F8B6705E2D5B58B2C1C820BB24E3F4BAE3E8FBCD36DBF734EE14E5D6AB972AEDD3540235466E825850EE4C512EA9795ABFD33F330D9FD7F79E62BBB63A6EA85DE15BEAEEA6F8D204A28956059E2632D11861DFB0E65BC07AC8A159388D5C3277E227286F65FF5E5B5AEC1"),
SHEX("E1FABE16152560387FADAD3324CBB94D8AF968786C3C994C8F926D32"));
test_hash(&nettle_sha3_224, /* 114 octets */
SHEX("8E58144FA9179D686478622CE450C748260C95D1BA43B8F9B59ABECA8D93488DA73463EF40198B4D16FB0B0707201347E0506FF19D01BEA0F42B8AF9E71A1F1BD168781069D4D338FDEF00BF419FBB003031DF671F4A37979564F69282DE9C65407847DD0DA505AB1641C02DEA4F0D834986"),
SHEX("CEF84F1966215B1511F5E0DB564D6827898184FBCB88BE0213FC563F"));
test_hash(&nettle_sha3_224, /* 115 octets */
SHEX("B55C10EAE0EC684C16D13463F29291BF26C82E2FA0422A99C71DB4AF14DD9C7F33EDA52FD73D017CC0F2DBE734D831F0D820D06D5F89DACC485739144F8CFD4799223B1AFF9031A105CB6A029BA71E6E5867D85A554991C38DF3C9EF8C1E1E9A7630BE61CAABCA69280C399C1FB7A12D12AEFC"),
SHEX("8E4B5A2B79FC1E7D0526AACB5B9AC01A569635644C9249DFFEE3B927"));
test_hash(&nettle_sha3_224, /* 116 octets */
SHEX("2EEEA693F585F4ED6F6F8865BBAE47A6908AECD7C429E4BEC4F0DE1D0CA0183FA201A0CB14A529B7D7AC0E6FF6607A3243EE9FB11BCF3E2304FE75FFCDDD6C5C2E2A4CD45F63C962D010645058D36571404A6D2B4F44755434D76998E83409C3205AA1615DB44057DB991231D2CB42624574F545"),
SHEX("BAFF55CDAD66AA77AD677E13A138B2F17286B504EA6B94EFFD9D9A95"));
test_hash(&nettle_sha3_224, /* 117 octets */
SHEX("DAB11DC0B047DB0420A585F56C42D93175562852428499F66A0DB811FCDDDAB2F7CDFFED1543E5FB72110B64686BC7B6887A538AD44C050F1E42631BC4EC8A9F2A047163D822A38989EE4AAB01B4C1F161B062D873B1CFA388FD301514F62224157B9BEF423C7783B7AAC8D30D65CD1BBA8D689C2D"),
SHEX("B4EFBE1167755F5A75B72CF15E0601662D036A16CAC8602A909FB328"));
test_hash(&nettle_sha3_224, /* 118 octets */
SHEX("42E99A2F80AEE0E001279A2434F731E01D34A44B1A8101726921C0590C30F3120EB83059F325E894A5AC959DCA71CE2214799916424E859D27D789437B9D27240BF8C35ADBAFCECC322B48AA205B293962D858652ABACBD588BCF6CBC388D0993BD622F96ED54614C25B6A9AA527589EAAFFCF17DDF7"),
SHEX("FA4BB608F8F60841E1189F8770051695CDC9935BDA7187C36419228A"));
test_hash(&nettle_sha3_224, /* 119 octets */
SHEX("3C9B46450C0F2CAE8E3823F8BDB4277F31B744CE2EB17054BDDC6DFF36AF7F49FB8A2320CC3BDF8E0A2EA29AD3A55DE1165D219ADEDDB5175253E2D1489E9B6FDD02E2C3D3A4B54D60E3A47334C37913C5695378A669E9B72DEC32AF5434F93F46176EBF044C4784467C700470D0C0B40C8A088C815816"),
SHEX("B3A877231519C24E2EFA424E6057128EA105B54C65E58074B5B1583C"));
test_hash(&nettle_sha3_224, /* 120 octets */
SHEX("D1E654B77CB155F5C77971A64DF9E5D34C26A3CAD6C7F6B300D39DEB1910094691ADAA095BE4BA5D86690A976428635D5526F3E946F7DC3BD4DBC78999E653441187A81F9ADCD5A3C5F254BC8256B0158F54673DCC1232F6E918EBFC6C51CE67EAEB042D9F57EEC4BFE910E169AF78B3DE48D137DF4F2840"),
SHEX("9F385C0B645DB8DB8B73C98C40BE264FFEE6151C7B5A0964E67DAA9F"));
test_hash(&nettle_sha3_224, /* 121 octets */
SHEX("626F68C18A69A6590159A9C46BE03D5965698F2DAC3DE779B878B3D9C421E0F21B955A16C715C1EC1E22CE3EB645B8B4F263F60660EA3028981EEBD6C8C3A367285B691C8EE56944A7CD1217997E1D9C21620B536BDBD5DE8925FF71DEC6FBC06624AB6B21E329813DE90D1E572DFB89A18120C3F606355D25"),
SHEX("BD6C865993082EC7B3808C13FD140FE0C0667B3EE51B9F8F1F4DFFD8"));
test_hash(&nettle_sha3_224, /* 122 octets */
SHEX("651A6FB3C4B80C7C68C6011675E6094EB56ABF5FC3057324EBC6477825061F9F27E7A94633ABD1FA598A746E4A577CAF524C52EC1788471F92B8C37F23795CA19D559D446CAB16CBCDCE90B79FA1026CEE77BF4AB1B503C5B94C2256AD75B3EAC6FD5DCB96ACA4B03A834BFB4E9AF988CECBF2AE597CB9097940"),
SHEX("8CA844ACFCAABD3B969F86C2F1ECDF1620574EC8C24426BE2DCC1BB5"));
test_hash(&nettle_sha3_224, /* 123 octets */
SHEX("8AAF072FCE8A2D96BC10B3C91C809EE93072FB205CA7F10ABD82ECD82CF040B1BC49EA13D1857815C0E99781DE3ADBB5443CE1C897E55188CEAF221AA9681638DE05AE1B322938F46BCE51543B57ECDB4C266272259D1798DE13BE90E10EFEC2D07484D9B21A3870E2AA9E06C21AA2D0C9CF420080A80A91DEE16F"),
SHEX("E8D549FF8D53745A4C5C75BDAD92314025DA877A77CE49EA134840FA"));
test_hash(&nettle_sha3_224, /* 124 octets */
SHEX("53F918FD00B1701BD504F8CDEA803ACCA21AC18C564AB90C2A17DA592C7D69688F6580575395551E8CD33E0FEF08CA6ED4588D4D140B3E44C032355DF1C531564D7F4835753344345A6781E11CD5E095B73DF5F82C8AE3AD00877936896671E947CC52E2B29DCD463D90A0C9929128DA222B5A211450BBC0E02448E2"),
SHEX("E6BD80787F8704FFF73112E8B368ADFBA3A1109162C769491349DCEF"));
test_hash(&nettle_sha3_224, /* 125 octets */
SHEX("A64599B8A61B5CCEC9E67AED69447459C8DA3D1EC6C7C7C82A7428B9B584FA67E90F68E2C00FBBED4613666E5168DA4A16F395F7A3C3832B3B134BFC9CBAA95D2A0FE252F44AC6681EB6D40AB91C1D0282FED6701C57463D3C5F2BB8C6A7301FB4576AA3B5F15510DB8956FF77478C26A7C09BEA7B398CFC83503F538E"),
SHEX("BD7D9E6CF9D2C1030F892533E01B72B5288E174B0864D81D71F8C6E6"));
test_hash(&nettle_sha3_224, /* 126 octets */
SHEX("0E3AB0E054739B00CDB6A87BD12CAE024B54CB5E550E6C425360C2E87E59401F5EC24EF0314855F0F56C47695D56A7FB1417693AF2A1ED5291F2FEE95F75EED54A1B1C2E81226FBFF6F63ADE584911C71967A8EB70933BC3F5D15BC91B5C2644D9516D3C3A8C154EE48E118BD1442C043C7A0DBA5AC5B1D5360AAE5B9065"),
SHEX("A5312E8C7F0A3594A8ECD1ABC5CBC14B2585F0B1FE32A4E1FA0A2E25"));
test_hash(&nettle_sha3_224, /* 127 octets */
SHEX("A62FC595B4096E6336E53FCDFC8D1CC175D71DAC9D750A6133D23199EAAC288207944CEA6B16D27631915B4619F743DA2E30A0C00BBDB1BBB35AB852EF3B9AEC6B0A8DCC6E9E1ABAA3AD62AC0A6C5DE765DE2C3711B769E3FDE44A74016FFF82AC46FA8F1797D3B2A726B696E3DEA5530439ACEE3A45C2A51BC32DD055650B"),
SHEX("2E0D739386AAAF37980EE421AA8C19B19AF52E70F59DC0A6988471F5"));
test_hash(&nettle_sha3_224, /* 128 octets */
SHEX("2B6DB7CED8665EBE9DEB080295218426BDAA7C6DA9ADD2088932CDFFBAA1C14129BCCDD70F369EFB149285858D2B1D155D14DE2FDB680A8B027284055182A0CAE275234CC9C92863C1B4AB66F304CF0621CD54565F5BFF461D3B461BD40DF28198E3732501B4860EADD503D26D6E69338F4E0456E9E9BAF3D827AE685FB1D817"),
SHEX("AF3E0CC6E64501F10FD39722E852355FD6D80D32190631E2F06C22AD"));
test_hash(&nettle_sha3_224, /* 129 octets */
SHEX("10DB509B2CDCABA6C062AE33BE48116A29EB18E390E1BBADA5CA0A2718AFBCD23431440106594893043CC7F2625281BF7DE2655880966A23705F0C5155C2F5CCA9F2C2142E96D0A2E763B70686CD421B5DB812DACED0C6D65035FDE558E94F26B3E6DDE5BD13980CC80292B723013BD033284584BFF27657871B0CF07A849F4AE2"),
SHEX("F009E05D1AFE2D33D2C5F4008B46F31468A7BF5299D4F0AB0EFE4FD3"));
test_hash(&nettle_sha3_224, /* 130 octets */
SHEX("9334DE60C997BDA6086101A6314F64E4458F5FF9450C509DF006E8C547983C651CA97879175AABA0C539E82D05C1E02C480975CBB30118121061B1EBAC4F8D9A3781E2DB6B18042E01ECF9017A64A0E57447EC7FCBE6A7F82585F7403EE2223D52D37B4BF426428613D6B4257980972A0ACAB508A7620C1CB28EB4E9D30FC41361EC"),
SHEX("76281BD1613843A3ADBCBC78D1923AFB5B8AA2DCBC48934DEEC84AAA"));
test_hash(&nettle_sha3_224, /* 131 octets */
SHEX("E88AB086891693AA535CEB20E64C7AB97C7DD3548F3786339897A5F0C39031549CA870166E477743CCFBE016B4428D89738E426F5FFE81626137F17AECFF61B72DBEE2DC20961880CFE281DFAB5EE38B1921881450E16032DE5E4D55AD8D4FCA609721B0692BAC79BE5A06E177FE8C80C0C83519FB3347DE9F43D5561CB8107B9B5EDC"),
SHEX("DA7C79E04FCA2B69AAA58199CA69105B6B18FE67E29F380501AA7FA8"));
test_hash(&nettle_sha3_224, /* 132 octets */
SHEX("FD19E01A83EB6EC810B94582CB8FBFA2FCB992B53684FB748D2264F020D3B960CB1D6B8C348C2B54A9FCEA72330C2AAA9A24ECDB00C436ABC702361A82BB8828B85369B8C72ECE0082FE06557163899C2A0EFA466C33C04343A839417057399A63A3929BE1EE4805D6CE3E5D0D0967FE9004696A5663F4CAC9179006A2CEB75542D75D68"),
SHEX("70ECB261757371A282903C696715DC03F106A339F076203BAB436E94"));
test_hash(&nettle_sha3_224, /* 133 octets */
SHEX("59AE20B6F7E0B3C7A989AFB28324A40FCA25D8651CF1F46AE383EF6D8441587AA1C04C3E3BF88E8131CE6145CFB8973D961E8432B202FA5AF3E09D625FAAD825BC19DA9B5C6C20D02ABDA2FCC58B5BD3FE507BF201263F30543819510C12BC23E2DDB4F711D087A86EDB1B355313363A2DE996B891025E147036087401CCF3CA7815BF3C49"),
SHEX("740D3CB455133173EC652AA04709EF0F549F19A9D4CC6BEC9E876B5A"));
test_hash(&nettle_sha3_224, /* 134 octets */
SHEX("77EE804B9F3295AB2362798B72B0A1B2D3291DCEB8139896355830F34B3B328561531F8079B79A6E9980705150866402FDC176C05897E359A6CB1A7AB067383EB497182A7E5AEF7038E4C96D133B2782917417E391535B5E1B51F47D8ED7E4D4025FE98DC87B9C1622614BFF3D1029E68E372DE719803857CA52067CDDAAD958951CB2068CC6"),
SHEX("663835A81A2A38D5AD3A37BD9BC96618D27CA32286E9091834A0871A"));
test_hash(&nettle_sha3_224, /* 135 octets */
SHEX("B771D5CEF5D1A41A93D15643D7181D2A2EF0A8E84D91812F20ED21F147BEF732BF3A60EF4067C3734B85BC8CD471780F10DC9E8291B58339A677B960218F71E793F2797AEA349406512829065D37BB55EA796FA4F56FD8896B49B2CD19B43215AD967C712B24E5032D065232E02C127409D2ED4146B9D75D763D52DB98D949D3B0FED6A8052FBB"),
SHEX("2594153AC2DE681F4DEE340FA344EC388773A377D5B89E503254FD2E"));
test_hash(&nettle_sha3_224, /* 136 octets */
SHEX("B32D95B0B9AAD2A8816DE6D06D1F86008505BD8C14124F6E9A163B5A2ADE55F835D0EC3880EF50700D3B25E42CC0AF050CCD1BE5E555B23087E04D7BF9813622780C7313A1954F8740B6EE2D3F71F768DD417F520482BD3A08D4F222B4EE9DBD015447B33507DD50F3AB4247C5DE9A8ABD62A8DECEA01E3B87C8B927F5B08BEB37674C6F8E380C04"),
SHEX("42275C296937745758FF2B7BEE9A897191AE87E42BD10198D9466C19"));
test_hash(&nettle_sha3_224, /* 137 octets */
SHEX("04410E31082A47584B406F051398A6ABE74E4DA59BB6F85E6B49E8A1F7F2CA00DFBA5462C2CD2BFDE8B64FB21D70C083F11318B56A52D03B81CAC5EEC29EB31BD0078B6156786DA3D6D8C33098C5C47BB67AC64DB14165AF65B44544D806DDE5F487D5373C7F9792C299E9686B7E5821E7C8E2458315B996B5677D926DAC57B3F22DA873C601016A0D"),
SHEX("143F9055EB1F736729C77721FB65ED5EE142F6E969132FB22989C11F"));
test_hash(&nettle_sha3_224, /* 138 octets */
SHEX("8B81E9BADDE026F14D95C019977024C9E13DB7A5CD21F9E9FC491D716164BBACDC7060D882615D411438AEA056C340CDF977788F6E17D118DE55026855F93270472D1FD18B9E7E812BAE107E0DFDE7063301B71F6CFE4E225CAB3B232905A56E994F08EE2891BA922D49C3DAFEB75F7C69750CB67D822C96176C46BD8A29F1701373FB09A1A6E3C7158F"),
SHEX("449A0313CCAB4427032B6BE9D66F827FFB4C71B538B2104F9D14D14A"));
test_hash(&nettle_sha3_224, /* 139 octets */
SHEX("FA6EED24DA6666A22208146B19A532C2EC9BA94F09F1DEF1E7FC13C399A48E41ACC2A589D099276296348F396253B57CB0E40291BD282773656B6E0D8BEA1CDA084A3738816A840485FCF3FB307F777FA5FEAC48695C2AF4769720258C77943FB4556C362D9CBA8BF103AEB9034BAA8EA8BFB9C4F8E6742CE0D52C49EA8E974F339612E830E9E7A9C29065"),
SHEX("21E2760644A19ED18ED0CD74C4E4C071D770132AD215EB6F7D42B01D"));
test_hash(&nettle_sha3_224, /* 140 octets */
SHEX("9BB4AF1B4F09C071CE3CAFA92E4EB73CE8A6F5D82A85733440368DEE4EB1CBC7B55AC150773B6FE47DBE036C45582ED67E23F4C74585DAB509DF1B83610564545642B2B1EC463E18048FC23477C6B2AA035594ECD33791AF6AF4CBC2A1166ABA8D628C57E707F0B0E8707CAF91CD44BDB915E0296E0190D56D33D8DDE10B5B60377838973C1D943C22ED335E"),
SHEX("D5534C72BE2E4B1FAAA813118B0D29DBB86F624067EA34515AFA08BE"));
test_hash(&nettle_sha3_224, /* 141 octets */
SHEX("2167F02118CC62043E9091A647CADBED95611A521FE0D64E8518F16C808AB297725598AE296880A773607A798F7C3CFCE80D251EBEC6885015F9ABF7EAABAE46798F82CB5926DE5C23F44A3F9F9534B3C6F405B5364C2F8A8BDC5CA49C749BED8CE4BA48897062AE8424CA6DDE5F55C0E42A95D1E292CA54FB46A84FBC9CD87F2D0C9E7448DE3043AE22FDD229"),
SHEX("C0CD413B1CE000A1BBE3A2CD103C7F8F95925AC6C8A5C922AFB5F96D"));
test_hash(&nettle_sha3_224, /* 142 octets */
SHEX("94B7FA0BC1C44E949B1D7617D31B4720CBE7CA57C6FA4F4094D4761567E389ECC64F6968E4064DF70DF836A47D0C713336B5028B35930D29EB7A7F9A5AF9AD5CF441745BAEC9BB014CEEFF5A41BA5C1CE085FEB980BAB9CF79F2158E03EF7E63E29C38D7816A84D4F71E0F548B7FC316085AE38A060FF9B8DEC36F91AD9EBC0A5B6C338CBB8F6659D342A24368CF"),
SHEX("93C6BF585E994B1669184AC71DC8E772B53443E668DA0786D528090B"));
test_hash(&nettle_sha3_224, /* 143 octets */
SHEX("EA40E83CB18B3A242C1ECC6CCD0B7853A439DAB2C569CFC6DC38A19F5C90ACBF76AEF9EA3742FF3B54EF7D36EB7CE4FF1C9AB3BC119CFF6BE93C03E208783335C0AB8137BE5B10CDC66FF3F89A1BDDC6A1EED74F504CBE7290690BB295A872B9E3FE2CEE9E6C67C41DB8EFD7D863CF10F840FE618E7936DA3DCA5CA6DF933F24F6954BA0801A1294CD8D7E66DFAFEC"),
SHEX("BFE15BB51F680F2F489F0FDEB32F271090A09D1563F29FEAF92104E0"));
test_hash(&nettle_sha3_224, /* 144 octets */
SHEX("157D5B7E4507F66D9A267476D33831E7BB768D4D04CC3438DA12F9010263EA5FCAFBDE2579DB2F6B58F911D593D5F79FB05FE3596E3FA80FF2F761D1B0E57080055C118C53E53CDB63055261D7C9B2B39BD90ACC32520CBBDBDA2C4FD8856DBCEE173132A2679198DAF83007A9B5C51511AE49766C792A29520388444EBEFE28256FB33D4260439CBA73A9479EE00C63"),
SHEX("6D735FB7579135F61B771B2BB0D81514CDE9C977ACCF6FEAF6EDEBF0"));
test_hash(&nettle_sha3_224, /* 145 octets */
SHEX("836B34B515476F613FE447A4E0C3F3B8F20910AC89A3977055C960D2D5D2B72BD8ACC715A9035321B86703A411DDE0466D58A59769672AA60AD587B8481DE4BBA552A1645779789501EC53D540B904821F32B0BD1855B04E4848F9F8CFE9EBD8911BE95781A759D7AD9724A7102DBE576776B7C632BC39B9B5E19057E226552A5994C1DBB3B5C7871A11F5537011044C53"),
SHEX("6D93153145904CEBE0E8A66C272BEDF4F0D0A3C53AB30264135431A7"));
test_hash(&nettle_sha3_224, /* 146 octets */
SHEX("CC7784A4912A7AB5AD3620AAB29BA87077CD3CB83636ADC9F3DC94F51EDF521B2161EF108F21A0A298557981C0E53CE6CED45BDF782C1EF200D29BAB81DD6460586964EDAB7CEBDBBEC75FD7925060F7DA2B853B2B089588FA0F8C16EC6498B14C55DCEE335CB3A91D698E4D393AB8E8EAC0825F8ADEBEEE196DF41205C011674E53426CAA453F8DE1CBB57932B0B741D4C6"),
SHEX("AFE30535675A7021BF618941D94DDFFCCEFCAA1EF06CDE306D5D7A75"));
test_hash(&nettle_sha3_224, /* 147 octets */
SHEX("7639B461FFF270B2455AC1D1AFCE782944AEA5E9087EB4A39EB96BB5C3BAAF0E868C8526D3404F9405E79E77BFAC5FFB89BF1957B523E17D341D7323C302EA7083872DD5E8705694ACDDA36D5A1B895AAA16ECA6104C82688532C8BFE1790B5DC9F4EC5FE95BAED37E1D287BE710431F1E5E8EE105BC42ED37D74B1E55984BF1C09FE6A1FA13EF3B96FAEAED6A2A1950A12153"),
SHEX("916501614891BD99400A8AEAABF69326FA98B833AED82386AB19E507"));
test_hash(&nettle_sha3_224, /* 148 octets */
SHEX("EB6513FC61B30CFBA58D4D7E80F94D14589090CF1D80B1DF2E68088DC6104959BA0D583D585E9578AB0AEC0CF36C48435EB52ED9AB4BBCE7A5ABE679C97AE2DBE35E8CC1D45B06DDA3CF418665C57CBEE4BBB47FA4CAF78F4EE656FEC237FE4EEBBAFA206E1EF2BD0EE4AE71BD0E9B2F54F91DAADF1FEBFD7032381D636B733DCB3BF76FB14E23AFF1F68ED3DBCF75C9B99C6F26"),
SHEX("9C3759905E47E49CC7057C9237545D444F758535F991F7E8728F3A51"));
test_hash(&nettle_sha3_224, /* 149 octets */
SHEX("1594D74BF5DDE444265D4C04DAD9721FF3E34CBF622DAF341FE16B96431F6C4DF1F760D34F296EB97D98D560AD5286FEC4DCE1724F20B54FD7DF51D4BF137ADD656C80546FB1BF516D62EE82BAA992910EF4CC18B70F3F8698276FCFB44E0EC546C2C39CFD8EE91034FF9303058B4252462F86C823EB15BF481E6B79CC3A02218595B3658E8B37382BD5048EAED5FD02C37944E73B"),
SHEX("733ACDF9CED47F2E43936ED6C2AC0F824F4F5B5D2942522D4DE5F6FC"));
test_hash(&nettle_sha3_224, /* 150 octets */
SHEX("4CFA1278903026F66FEDD41374558BE1B585D03C5C55DAC94361DF286D4BD39C7CB8037ED3B267B07C346626449D0CC5B0DD2CF221F7E4C3449A4BE99985D2D5E67BFF2923357DDEAB5ABCB4619F3A3A57B2CF928A022EB27676C6CF805689004FCA4D41EA6C2D0A4789C7605F7BB838DD883B3AD3E6027E775BCF262881428099C7FFF95B14C095EA130E0B9938A5E22FC52650F591"),
SHEX("530438B7A86B16434C82713EF7392D25C5CF814C7C6408368C4F2EAF"));
test_hash(&nettle_sha3_224, /* 151 octets */
SHEX("D3E65CB92CFA79662F6AF493D696A07CCF32AAADCCEFF06E73E8D9F6F909209E66715D6E978788C49EFB9087B170ECF3AA86D2D4D1A065AE0EFC8924F365D676B3CB9E2BEC918FD96D0B43DEE83727C9A93BF56CA2B2E59ADBA85696546A815067FC7A78039629D4948D157E7B0D826D1BF8E81237BAB7321312FDAA4D521744F988DB6FDF04549D0FDCA393D639C729AF716E9C8BBA48"),
SHEX("84944EB018F8A124E3C969C037464EE32BACF8E58901D2E22291DF9A"));
test_hash(&nettle_sha3_224, /* 152 octets */
SHEX("842CC583504539622D7F71E7E31863A2B885C56A0BA62DB4C2A3F2FD12E79660DC7205CA29A0DC0A87DB4DC62EE47A41DB36B9DDB3293B9AC4BAAE7DF5C6E7201E17F717AB56E12CAD476BE49608AD2D50309E7D48D2D8DE4FA58AC3CFEAFEEE48C0A9EEC88498E3EFC51F54D300D828DDDCCB9D0B06DD021A29CF5CB5B2506915BEB8A11998B8B886E0F9B7A80E97D91A7D01270F9A7717"),
SHEX("1311DA757C405F2A0EAB110B0C515F05FCD59F5495A9704252DA5AB8"));
test_hash(&nettle_sha3_224, /* 153 octets */
SHEX("6C4B0A0719573E57248661E98FEBE326571F9A1CA813D3638531AE28B4860F23C3A3A8AC1C250034A660E2D71E16D3ACC4BF9CE215C6F15B1C0FC7E77D3D27157E66DA9CEEC9258F8F2BF9E02B4AC93793DD6E29E307EDE3695A0DF63CBDC0FC66FB770813EB149CA2A916911BEE4902C47C7802E69E405FE3C04CEB5522792A5503FA829F707272226621F7C488A7698C0D69AA561BE9F378"),
SHEX("B5FDAEAD7E68333CEDB5D4AD636AE7059EB31305E2C831787FD51265"));
test_hash(&nettle_sha3_224, /* 154 octets */
SHEX("51B7DBB7CE2FFEB427A91CCFE5218FD40F9E0B7E24756D4C47CD55606008BDC27D16400933906FD9F30EFFDD4880022D081155342AF3FB6CD53672AB7FB5B3A3BCBE47BE1FD3A2278CAE8A5FD61C1433F7D350675DD21803746CADCA574130F01200024C6340AB0CC2CF74F2234669F34E9009EF2EB94823D62B31407F4BA46F1A1EEC41641E84D77727B59E746B8A671BEF936F05BE820759FA"),
SHEX("2919FD6C376AEC9F502893A9970B9AC6591855227C0E137BE01705AC"));
test_hash(&nettle_sha3_224, /* 155 octets */
SHEX("83599D93F5561E821BD01A472386BC2FF4EFBD4AED60D5821E84AAE74D8071029810F5E286F8F17651CD27DA07B1EB4382F754CD1C95268783AD09220F5502840370D494BEB17124220F6AFCE91EC8A0F55231F9652433E5CE3489B727716CF4AEBA7DCDA20CD29AA9A859201253F948DD94395ABA9E3852BD1D60DDA7AE5DC045B283DA006E1CBAD83CC13292A315DB5553305C628DD091146597"),
SHEX("8910E7ABC3DAA506974EC13E35C43133EBFA91DEEC99BFAD4954447E"));
test_hash(&nettle_sha3_224, /* 156 octets */
SHEX("2BE9BF526C9D5A75D565DD11EF63B979D068659C7F026C08BEA4AF161D85A462D80E45040E91F4165C074C43AC661380311A8CBED59CC8E4C4518E80CD2C78AB1CABF66BFF83EAB3A80148550307310950D034A6286C93A1ECE8929E6385C5E3BB6EA8A7C0FB6D6332E320E71CC4EB462A2A62E2BFE08F0CCAD93E61BEDB5DD0B786A728AB666F07E0576D189C92BF9FB20DCA49AC2D3956D47385E2"),
SHEX("F8B4A4A6FBB8C8432712B5B815B36685C86656C3F67D05BDBB44B49A"));
test_hash(&nettle_sha3_224, /* 157 octets */
SHEX("CA76D3A12595A817682617006848675547D3E8F50C2210F9AF906C0E7CE50B4460186FE70457A9E879E79FD4D1A688C70A347361C847BA0DD6AA52936EAF8E58A1BE2F5C1C704E20146D366AEB3853BED9DE9BEFE9569AC8AAEA37A9FB7139A1A1A7D5C748605A8DEFB297869EBEDD71D615A5DA23496D11E11ABBB126B206FA0A7797EE7DE117986012D0362DCEF775C2FE145ADA6BDA1CCB326BF644"),
SHEX("926FE0044B12422D3E4BFA52C59252ACC91DBF09C488AE9D31C7EB63"));
test_hash(&nettle_sha3_224, /* 158 octets */
SHEX("F76B85DC67421025D64E93096D1D712B7BAF7FB001716F02D33B2160C2C882C310EF13A576B1C2D30EF8F78EF8D2F465007109AAD93F74CB9E7D7BEF7C9590E8AF3B267C89C15DB238138C45833C98CC4A471A7802723EF4C744A853CF80A0C2568DD4ED58A2C9644806F42104CEE53628E5BDF7B63B0B338E931E31B87C24B146C6D040605567CEEF5960DF9E022CB469D4C787F4CBA3C544A1AC91F95F"),
SHEX("A4E4B4A573F7B8865D77D7E57F7D840A55261A96E5FEDD763D0811F4"));
test_hash(&nettle_sha3_224, /* 159 octets */
SHEX("25B8C9C032EA6BCD733FFC8718FBB2A503A4EA8F71DEA1176189F694304F0FF68E862A8197B839957549EF243A5279FC2646BD4C009B6D1EDEBF24738197ABB4C992F6B1DC9BA891F570879ACCD5A6B18691A93C7D0A8D38F95B639C1DAEB48C4C2F15CCF5B9D508F8333C32DE78781B41850F261B855C4BEBCC125A380C54D501C5D3BD07E6B52102116088E53D76583B0161E2A58D0778F091206AABD5A1"),
SHEX("EBFD796B29F6059931732F98602185B6377C4E6E40BD26C810D6DA96"));
test_hash(&nettle_sha3_224, /* 160 octets */
SHEX("21CFDC2A7CCB7F331B3D2EEFFF37E48AD9FA9C788C3F3C200E0173D99963E1CBCA93623B264E920394AE48BB4C3A5BB96FFBC8F0E53F30E22956ADABC2765F57FB761E147ECBF8567533DB6E50C8A1F894310A94EDF806DD8CA6A0E141C0FA7C9FAE6C6AE65F18C93A8529E6E5B553BF55F25BE2E80A9882BD37F145FECBEB3D447A3C4E46C21524CC55CDD62F521AB92A8BA72B897996C49BB273198B7B1C9E"),
SHEX("3FB7392A6621B852312A374C14A679AFB0E3D2EC6A2D147BD5E873F6"));
test_hash(&nettle_sha3_224, /* 161 octets */
SHEX("4E452BA42127DCC956EF4F8F35DD68CB225FB73B5BC7E1EC5A898BBA2931563E74FAFF3B67314F241EC49F4A7061E3BD0213AE826BAB380F1F14FAAB8B0EFDDD5FD1BB49373853A08F30553D5A55CCBBB8153DE4704F29CA2BDEEF0419468E05DD51557CCC80C0A96190BBCC4D77ECFF21C66BDF486459D427F986410F883A80A5BCC32C20F0478BB9A97A126FC5F95451E40F292A4614930D054C851ACD019CCF"),
SHEX("8B3750655AF5ECA10CC4F291043590E2D19759253047A4C1DBC86577"));
test_hash(&nettle_sha3_224, /* 162 octets */
SHEX("FA85671DF7DADF99A6FFEE97A3AB9991671F5629195049880497487867A6C446B60087FAC9A0F2FCC8E3B24E97E42345B93B5F7D3691829D3F8CCD4BB36411B85FC2328EB0C51CB3151F70860AD3246CE0623A8DC8B3C49F958F8690F8E3860E71EB2B1479A5CEA0B3F8BEFD87ACAF5362435EAECCB52F38617BC6C5C2C6E269EAD1FBD69E941D4AD2012DA2C5B21BCFBF98E4A77AB2AF1F3FDA3233F046D38F1DC8"),
SHEX("D3A5004477BBB21CF7D0FCA84E51A7A57E93FAE7222570C01B00E89A"));
test_hash(&nettle_sha3_224, /* 163 octets */
SHEX("E90847AE6797FBC0B6B36D6E588C0A743D725788CA50B6D792352EA8294F5BA654A15366B8E1B288D84F5178240827975A763BC45C7B0430E8A559DF4488505E009C63DA994F1403F407958203CEBB6E37D89C94A5EACF6039A327F6C4DBBC7A2A307D976AA39E41AF6537243FC218DFA6AB4DD817B6A397DF5CA69107A9198799ED248641B63B42CB4C29BFDD7975AC96EDFC274AC562D0474C60347A078CE4C25E88"),
SHEX("75B77C36E394711DFD35C11AEC8C033DCD7C18712F3B06D1FEDC1077"));
test_hash(&nettle_sha3_224, /* 164 octets */
SHEX("F6D5C2B6C93954FC627602C00C4CA9A7D3ED12B27173F0B2C9B0E4A5939398A665E67E69D0B12FB7E4CEB253E8083D1CEB724AC07F009F094E42F2D6F2129489E846EAFF0700A8D4453EF453A3EDDC18F408C77A83275617FABC4EA3A2833AA73406C0E966276079D38E8E38539A70E194CC5513AAA457C699383FD1900B1E72BDFB835D1FD321B37BA80549B078A49EA08152869A918CA57F5B54ED71E4FD3AC5C06729"),
SHEX("E52DF7FDF957269CA0B0F46553D554FE2E6367019B379A1E4F4C7A9F"));
test_hash(&nettle_sha3_224, /* 165 octets */
SHEX("CF8562B1BED89892D67DDAAF3DEEB28246456E972326DBCDB5CF3FB289ACA01E68DA5D59896E3A6165358B071B304D6AB3D018944BE5049D5E0E2BB819ACF67A6006111089E6767132D72DD85BEDDCBB2D64496DB0CC92955AB4C6234F1EEA24F2D51483F2E209E4589BF9519FAC51B4D061E801125E605F8093BB6997BC163D551596FE4AB7CFAE8FB9A90F6980480CE0C229FD1675409BD788354DAF316240CFE0AF93EB"),
SHEX("41853CD54692DBD478BB1E2D6CEDCDA1D139C838AC956A37C87F098F"));
test_hash(&nettle_sha3_224, /* 166 octets */
SHEX("2ACE31ABB0A2E3267944D2F75E1559985DB7354C6E605F18DC8470423FCA30B7331D9B33C4A4326783D1CAAE1B4F07060EFF978E4746BF0C7E30CD61040BD5EC2746B29863EB7F103EBDA614C4291A805B6A4C8214230564A0557BC7102E0BD3ED23719252F7435D64D210EE2AAFC585BE903FA41E1968C50FD5D5367926DF7A05E3A42CF07E656FF92DE73B036CF8B19898C0CB34557C0C12C2D8B84E91181AF467BC75A9D1"),
SHEX("1F2727D5132C453BD321A9FC7AA46FB8B3341D90988C41DE8439D2F1"));
test_hash(&nettle_sha3_224, /* 167 octets */
SHEX("0D8D09AED19F1013969CE5E7EB92F83A209AE76BE31C754844EA9116CEB39A22EBB6003017BBCF26555FA6624185187DB8F0CB3564B8B1C06BF685D47F3286EDA20B83358F599D2044BBF0583FAB8D78F854FE0A596183230C5EF8E54426750EAF2CC4E29D3BDD037E734D863C2BD9789B4C243096138F7672C232314EFFDFC6513427E2DA76916B5248933BE312EB5DDE4CF70804FB258AC5FB82D58D08177AC6F4756017FFF5"),
SHEX("5E745F8966D91EEE013B061281BC20C79B0323000A15BBDE7E0D25AE"));
test_hash(&nettle_sha3_224, /* 168 octets */
SHEX("C3236B73DEB7662BF3F3DAA58F137B358BA610560EF7455785A9BEFDB035A066E90704F929BD9689CEF0CE3BDA5ACF4480BCEB8D09D10B098AD8500D9B6071DFC3A14AF6C77511D81E3AA8844986C3BEA6F469F9E02194C92868CD5F51646256798FF0424954C1434BDFED9FACB390B07D342E992936E0F88BFD0E884A0DDB679D0547CCDEC6384285A45429D115AC7D235A717242021D1DC35641F5F0A48E8445DBA58E6CB2C8EA"),
SHEX("CD2EEB7D48D0260986BADF16F15AA09B5229B7830C73EE95B8CBF85A"));
test_hash(&nettle_sha3_224, /* 169 octets */
SHEX("B39FEB8283EADC63E8184B51DF5AE3FD41AAC8A963BB0BE1CD08AA5867D8D910C669221E73243360646F6553D1CA05A84E8DC0DE05B6419EC349CA994480193D01C92525F3FB3DCEFB08AFC6D26947BDBBFD85193F53B50609C6140905C53A6686B58E53A319A57B962331EDE98149AF3DE3118A819DA4D76706A0424B4E1D2910B0ED26AF61D150EBCB46595D4266A0BD7F651BA47D0C7F179CA28545007D92E8419D48FDFBD744CE"),
SHEX("3322FA727A0089F500A6A99D67419A76C7AF77EF2893E8D385B42720"));
test_hash(&nettle_sha3_224, /* 170 octets */
SHEX("A983D54F503803E8C7999F4EDBBE82E9084F422143A932DDDDC47A17B0B7564A7F37A99D0786E99476428D29E29D3C197A72BFAB1342C12A0FC4787FD7017D7A6174049EA43B5779169EF7472BDBBD941DCB82FC73AAC45A8A94C9F2BD3477F61FD3B796F02A1B8264A214C6FEA74B7051B226C722099EC7883A462B83B6AFDD4009248B8A237F605FE5A08FE7D8B45321421EBBA67BD70A0B00DDBF94BAAB7F359D5D1EEA105F28DCFB"),
SHEX("234C1BC03FD4C3D38DD4C736B59A9107911210D54E98B3A372F57236"));
test_hash(&nettle_sha3_224, /* 171 octets */
SHEX("E4D1C1897A0A866CE564635B74222F9696BF2C7F640DD78D7E2ACA66E1B61C642BB03EA7536AAE597811E9BF4A7B453EDE31F97B46A5F0EF51A071A2B3918DF16B152519AE3776F9F1EDAB4C2A377C3292E96408359D3613844D5EB393000283D5AD3401A318B12FD1474B8612F2BB50FB6A8B9E023A54D7DDE28C43D6D8854C8D9D1155935C199811DBFC87E9E0072E90EB88681CC7529714F8FB8A2C9D88567ADFB974EE205A9BF7B848"),
SHEX("BF229F4017E1674D4CB87B70D3D777C7114F085D77216437B860D641"));
test_hash(&nettle_sha3_224, /* 172 octets */
SHEX("B10C59723E3DCADD6D75DF87D0A1580E73133A9B7D00CB95EC19F5547027323BE75158B11F80B6E142C6A78531886D9047B08E551E75E6261E79785366D7024BD7CD9CF322D9BE7D57FB661069F2481C7BB759CD71B4B36CA2BC2DF6D3A328FAEBDB995A9794A8D72155ED551A1F87C80BF6059B43FC764900B18A1C2441F7487743CF84E565F61F8DD2ECE6B6CCC9444049197AAAF53E926FBEE3BFCA8BE588EC77F29D211BE89DE18B15F6"),
SHEX("F95DE3F40E5FAF58D3320B5B24ACEC7DE6B4B7E54C2F80F6D314AB5A"));
test_hash(&nettle_sha3_224, /* 173 octets */
SHEX("DB11F609BABA7B0CA634926B1DD539C8CBADA24967D7ADD4D9876F77C2D80C0F4DCEFBD7121548373582705CCA2495BD2A43716FE64ED26D059CFB566B3364BD49EE0717BDD9810DD14D8FAD80DBBDC4CAFB37CC60FB0FE2A80FB4541B8CA9D59DCE457738A9D3D8F641AF8C3FD6DA162DC16FC01AAC527A4A0255B4D231C0BE50F44F0DB0B713AF03D968FE7F0F61ED0824C55C4B5265548FEBD6AAD5C5EEDF63EFE793489C39B8FD29D104CE"),
SHEX("04B3BBBDDFEBA441005A48CEBDBB1C6B6A674C2D9B224DA29844374D"));
test_hash(&nettle_sha3_224, /* 174 octets */
SHEX("BEBD4F1A84FC8B15E4452A54BD02D69E304B7F32616AADD90537937106AE4E28DE9D8AAB02D19BC3E2FDE1D651559E296453E4DBA94370A14DBBB2D1D4E2022302EE90E208321EFCD8528AD89E46DC839EA9DF618EA8394A6BFF308E7726BAE0C19BCD4BE52DA6258E2EF4E96AA21244429F49EF5CB486D7FF35CAC1BACB7E95711944BCCB2AB34700D42D1EB38B5D536B947348A458EDE3DC6BD6EC547B1B0CAE5B257BE36A7124E1060C170FFA"),
SHEX("6C1809CD88A0EDB211986359498E0AC37E25E8EB62946938C37D3C26"));
test_hash(&nettle_sha3_224, /* 175 octets */
SHEX("5ACA56A03A13784BDC3289D9364F79E2A85C12276B49B92DB0ADAA4F206D5028F213F678C3510E111F9DC4C1C1F8B6ACB17A6413AA227607C515C62A733817BA5E762CC6748E7E0D6872C984D723C9BB3B117EB8963185300A80BFA65CDE495D70A46C44858605FCCBED086C2B45CEF963D33294DBE9706B13AF22F1B7C4CD5A001CFEC251FBA18E722C6E1C4B1166918B4F6F48A98B64B3C07FC86A6B17A6D0480AB79D4E6415B520F1C484D675B1"),
SHEX("D2744A1BBB34718FCBB614C21E1FCCD0FF88615CB82AA03803AB9460"));
test_hash(&nettle_sha3_224, /* 176 octets */
SHEX("A5AAD0E4646A32C85CFCAC73F02FC5300F1982FABB2F2179E28303E447854094CDFC854310E5C0F60993CEFF54D84D6B46323D930ADB07C17599B35B505F09E784BCA5985E0172257797FB53649E2E9723EFD16865C31B5C3D5113B58BB0BFC8920FABDDA086D7537E66D709D050BD14D0C960873F156FAD5B3D3840CDFCDC9BE6AF519DB262A27F40896AB25CC39F96984D650611C0D5A3080D5B3A1BF186ABD42956588B3B58CD948970D298776060"),
SHEX("F6115F635D98B572FD1BA85763ECCF8BF273FBF7B96F0DB0120CA8AD"));
test_hash(&nettle_sha3_224, /* 177 octets */
SHEX("06CBBE67E94A978203EAD6C057A1A5B098478B4B4CBEF5A97E93C8E42F5572713575FC2A884531D7622F8F879387A859A80F10EF02708CD8F7413AB385AFC357678B9578C0EBF641EF076A1A30F1F75379E9DCB2A885BDD295905EE80C0168A62A9597D10CF12DD2D8CEE46645C7E5A141F6E0E23AA482ABE5661C16E69EF1E28371E2E236C359BA4E92C25626A7B7FF13F6EA4AE906E1CFE163E91719B1F750A96CBDE5FBC953D9E576CD216AFC90323A"),
SHEX("5EE73A4F13A08A2D9B1E52DF88972FFB9F03B843A387EE52B00EDCEE"));
test_hash(&nettle_sha3_224, /* 178 octets */
SHEX("F1C528CF7739874707D4D8AD5B98F7C77169DE0B57188DF233B2DC8A5B31EDA5DB4291DD9F68E6BAD37B8D7F6C9C0044B3BF74BBC3D7D1798E138709B0D75E7C593D3CCCDC1B20C7174B4E692ADD820ACE262D45CCFAE2077E878796347168060A162ECCA8C38C1A88350BD63BB539134F700FD4ADDD5959E255337DAA06BC86358FABCBEFDFB5BC889783D843C08AADC6C4F6C36F65F156E851C9A0F917E4A367B5AD93D874812A1DE6A7B93CD53AD97232"),
SHEX("44BC64559BDB910B7079E0261FF8B49DBA141B32ECBCB70B3ABDFBF9"));
test_hash(&nettle_sha3_224, /* 179 octets */
SHEX("9D9F3A7ECD51B41F6572FD0D0881E30390DFB780991DAE7DB3B47619134718E6F987810E542619DFAA7B505C76B7350C6432D8BF1CFEBDF1069B90A35F0D04CBDF130B0DFC7875F4A4E62CDB8E525AADD7CE842520A482AC18F09442D78305FE85A74E39E760A4837482ED2F437DD13B2EC1042AFCF9DECDC3E877E50FF4106AD10A525230D11920324A81094DA31DEAB6476AA42F20C84843CFC1C58545EE80352BDD3740DD6A16792AE2D86F11641BB717C2"),
SHEX("DE82ADDE823C312F83B3D4C0BD35AA0395AB747ABBC22A70973E2A6C"));
test_hash(&nettle_sha3_224, /* 180 octets */
SHEX("5179888724819FBAD3AFA927D3577796660E6A81C52D98E9303261D5A4A83232F6F758934D50AA83FF9E20A5926DFEBAAC49529D006EB923C5AE5048ED544EC471ED7191EDF46363383824F915769B3E688094C682B02151E5EE01E510B431C8865AFF8B6B6F2F59CB6D129DA79E97C6D2B8FA6C6DA3F603199D2D1BCAB547682A81CD6CF65F6551121391D78BCC23B5BD0E922EC6D8BF97C952E84DD28AEF909ABA31EDB903B28FBFC33B7703CD996215A11238"),
SHEX("B1BA910C9F5E126607FF2531AFFECBA791261E354E2C1A81FDA7A756"));
test_hash(&nettle_sha3_224, /* 181 octets */
SHEX("576EF3520D30B7A4899B8C0D5E359E45C5189ADD100E43BE429A02FB3DE5FF4F8FD0E79D9663ACCA72CD29C94582B19292A557C5B1315297D168FBB54E9E2ECD13809C2B5FCE998EDC6570545E1499DBE7FB74D47CD7F35823B212B05BF3F5A79CAA34224FDD670D335FCB106F5D92C3946F44D3AFCBAE2E41AC554D8E6759F332B76BE89A0324AA12C5482D1EA3EE89DED4936F3E3C080436F539FA137E74C6D3389BDF5A45074C47BC7B20B0948407A66D855E2F"),
SHEX("3EF8D4A6BB8E172374E806E8D65D5F81B3FDB36299DE1C0CCC26DC65"));
test_hash(&nettle_sha3_224, /* 182 octets */
SHEX("0DF2152FA4F4357C8741529DD77E783925D3D76E95BAFA2B542A2C33F3D1D117D159CF473F82310356FEE4C90A9E505E70F8F24859656368BA09381FA245EB6C3D763F3093F0C89B972E66B53D59406D9F01AEA07F8B3B615CAC4EE4D05F542E7D0DAB45D67CCCCD3A606CCBEB31EA1FA7005BA07176E60DAB7D78F6810EF086F42F08E595F0EC217372B98970CC6321576D92CE38F7C397A403BADA1548D205C343AC09DECA86325373C3B76D9F32028FEA8EB32515"),
SHEX("1C89D6460B3F13584BF8319EE538F24C850CA771A51ECC547652BAE3"));
test_hash(&nettle_sha3_224, /* 183 octets */
SHEX("3E15350D87D6EBB5C8AD99D42515CFE17980933C7A8F6B8BBBF0A63728CEFAAD2052623C0BD5931839112A48633FB3C2004E0749C87A41B26A8B48945539D1FF41A4B269462FD199BFECD45374756F55A9116E92093AC99451AEFB2AF9FD32D6D7F5FBC7F7A540D5097C096EBC3B3A721541DE073A1CC02F7FB0FB1B9327FB0B1218CA49C9487AB5396622A13AE546C97ABDEF6B56380DDA7012A8384091B6656D0AB272D363CEA78163FF765CDD13AB1738B940D16CAE"),
SHEX("99981766CFE3B1888F2A008EFA1088016CB29993567F9BB74B5C4D3C"));
test_hash(&nettle_sha3_224, /* 184 octets */
SHEX("C38D6B0B757CB552BE40940ECE0009EF3B0B59307C1451686F1A22702922800D58BCE7A636C1727EE547C01B214779E898FC0E560F8AE7F61BEF4D75EAA696B921FD6B735D171535E9EDD267C192B99880C87997711002009095D8A7A437E258104A41A505E5EF71E5613DDD2008195F0C574E6BA3FE40099CFA116E5F1A2FA8A6DA04BADCB4E2D5D0DE31FDC4800891C45781A0AAC7C907B56D631FCA5CE8B2CDE620D11D1777ED9FA603541DE794DDC5758FCD5FAD78C0"),
SHEX("0215E91EF992DCC7E82D16A2C9B27921C1310C182F59DF8BED5151E8"));
test_hash(&nettle_sha3_224, /* 185 octets */
SHEX("8D2DE3F0B37A6385C90739805B170057F091CD0C7A0BC951540F26A5A75B3E694631BB64C7635EED316F51318E9D8DE13C70A2ABA04A14836855F35E480528B776D0A1E8A23B547C8B8D6A0D09B241D3BE9377160CCA4E6793D00A515DC2992CB7FC741DACA171431DA99CCE6F7789F129E2AC5CF65B40D703035CD2185BB936C82002DAF8CBC27A7A9E554B06196630446A6F0A14BA155ED26D95BD627B7205C072D02B60DB0FD7E49EA058C2E0BA202DAFF0DE91E845CF79"),
SHEX("E52EA6714A3978810DC19E999C32516D4ACF0CBCD67E917A4FEB56D0"));
test_hash(&nettle_sha3_224, /* 186 octets */
SHEX("C464BBDAD275C50DCD983B65AD1019B9FF85A1E71C807F3204BB2C921DC31FBCD8C5FC45868AE9EF85B6C9B83BBA2A5A822201ED68586EC5EC27FB2857A5D1A2D09D09115F22DCC39FE61F5E1BA0FF6E8B4ACB4C6DA748BE7F3F0839739394FF7FA8E39F7F7E84A33C3866875C01BCB1263C9405D91908E9E0B50E7459FABB63D8C6BBB73D8E3483C099B55BC30FF092FF68B6ADEDFD477D63570C9F5515847F36E24BA0B705557130CEC57EBAD1D0B31A378E91894EE26E3A04"),
SHEX("4C3D6321133EF74810E60D3190FFF3CF20C8521CAEA6FF782D7E3BAB"));
test_hash(&nettle_sha3_224, /* 187 octets */
SHEX("8B8D68BB8A75732FE272815A68A1C9C5AA31B41DEDC8493E76525D1D013D33CEBD9E21A5BB95DB2616976A8C07FCF411F5F6BC6F7E0B57ACA78CC2790A6F9B898858AC9C79B165FF24E66677531E39F572BE5D81EB3264524181115F32780257BFB9AEEC6AF12AF28E587CAC068A1A2953B59AD680F4C245B2E3EC36F59940D37E1D3DB38E13EDB29B5C0F404F6FF87F80FC8BE7A225FF22FBB9C8B6B1D7330C57840D24BC75B06B80D30DAD6806544D510AF6C4785E823AC3E0B8"),
SHEX("B9F006DBF853C023DEBE2F40035A7E83C49CDE656EC86A4621950F3E"));
test_hash(&nettle_sha3_224, /* 188 octets */
SHEX("6B018710446F368E7421F1BC0CCF562D9C1843846BC8D98D1C9BF7D9D6FCB48BFC3BF83B36D44C4FA93430AF75CD190BDE36A7F92F867F58A803900DF8018150384D85D82132F123006AC2AEBA58E02A037FE6AFBD65ECA7C44977DD3DC74F48B6E7A1BFD5CC4DCF24E4D52E92BD4455848E4928B0EAC8B7476FE3CC03E862AA4DFF4470DBFED6DE48E410F25096487ECFC32A27277F3F5023B2725ADE461B1355889554A8836C9CF53BD767F5737D55184EEA1AB3F53EDD0976C485"),
SHEX("0A5AA6BC564B8CB2F5FD7255455C0E7A5DACE0050C3BBD259FDE2AB9"));
test_hash(&nettle_sha3_224, /* 189 octets */
SHEX("C9534A24714BD4BE37C88A3DA1082EDA7CABD154C309D7BD670DCCD95AA535594463058A29F79031D6ECAA9F675D1211E9359BE82669A79C855EA8D89DD38C2C761DDD0EC0CE9E97597432E9A1BEAE062CDD71EDFDFD464119BE9E69D18A7A7FD7CE0E2106F0C8B0ABF4715E2CA48EF9F454DC203C96656653B727083513F8EFB86E49C513BB758B3B052FE21F1C05BB33C37129D6CC81F1AEF6ADC45B0E8827A830FE545CF57D0955802C117D23CCB55EA28F95C0D8C2F9C5A242B33F"),
SHEX("8CA4E085F04956B5B16520E3A767F8BA937364FE5F4460288AD4F231"));
test_hash(&nettle_sha3_224, /* 190 octets */
SHEX("07906C87297B867ABF4576E9F3CC7F82F22B154AFCBF293B9319F1B0584DA6A40C27B32E0B1B7F412C4F1B82480E70A9235B12EC27090A5A33175A2BB28D8ADC475CEFE33F7803F8CE27967217381F02E67A3B4F84A71F1C5228E0C2AD971373F6F672624FCEA8D1A9F85170FAD30FA0BBD25035C3B41A6175D467998BD1215F6F3866F53847F9CF68EF3E2FBB54BC994DE2302B829C5EEA68EC441FCBAFD7D16AE4FE9FFF98BF00E5BC2AD54DD91FF9FDA4DD77B6C754A91955D1FBAAD0"),
SHEX("C0AA34391CB3104C41995F3DE782F012D421585E5384E047A997062F"));
test_hash(&nettle_sha3_224, /* 191 octets */
SHEX("588E94B9054ABC2189DF69B8BA34341B77CDD528E7860E5DEFCAA79B0C9A452AD4B82AA306BE84536EB7CEDCBE058D7B84A6AEF826B028B8A0271B69AC3605A9635EA9F5EA0AA700F3EB7835BC54611B922964300C953EFE7491E3677C2CEBE0822E956CD16433B02C68C4A23252C3F9E151A416B4963257B783E038F6B4D5C9F110F871652C7A649A7BCEDCBCCC6F2D0725BB903CC196BA76C76AA9F10A190B1D1168993BAA9FFC96A1655216773458BEC72B0E39C9F2C121378FEAB4E76A"),
SHEX("33C10010A0B810386AE62F3F927DEAFC0D5AF0AF3DC7A8355CB779CD"));
test_hash(&nettle_sha3_224, /* 192 octets */
SHEX("08959A7E4BAAE874928813364071194E2939772F20DB7C3157078987C557C2A6D5ABE68D520EEF3DC491692E1E21BCD880ADEBF63BB4213B50897FA005256ED41B5690F78F52855C8D9168A4B666FCE2DA2B456D7A7E7C17AB5F2FB1EE90B79E698712E963715983FD07641AE4B4E9DC73203FAC1AE11FA1F8C7941FCC82EAB247ADDB56E2638447E9D609E610B60CE086656AAEBF1DA3C8A231D7D94E2FD0AFE46B391FF14A72EAEB3F44AD4DF85866DEF43D4781A0B3578BC996C87970B132"),
SHEX("842A2E13D2728CA55B42D784BB6BC4B889E56775AD56BF75789CC57A"));
test_hash(&nettle_sha3_224, /* 193 octets */
SHEX("CB2A234F45E2ECD5863895A451D389A369AAB99CFEF0D5C9FFCA1E6E63F763B5C14FB9B478313C8E8C0EFEB3AC9500CF5FD93791B789E67EAC12FD038E2547CC8E0FC9DB591F33A1E4907C64A922DDA23EC9827310B306098554A4A78F050262DB5B545B159E1FF1DCA6EB734B872343B842C57EAFCFDA8405EEDBB48EF32E99696D135979235C3A05364E371C2D76F1902F1D83146DF9495C0A6C57D7BF9EE77E80F9787AEE27BE1FE126CDC9EF893A4A7DCBBC367E40FE4E1EE90B42EA25AF01"),
SHEX("A576281CFAA89DCEFB1D37772400BA4CABCEEF33CBA2F833336A74F2"));
test_hash(&nettle_sha3_224, /* 194 octets */
SHEX("D16BEADF02AB1D4DC6F88B8C4554C51E866DF830B89C06E786A5F8757E8909310AF51C840EFE8D20B35331F4355D80F73295974653DDD620CDDE4730FB6C8D0D2DCB2B45D92D4FBDB567C0A3E86BD1A8A795AF26FBF29FC6C65941CDDB090FF7CD230AC5268AB4606FCCBA9EDED0A2B5D014EE0C34F0B2881AC036E24E151BE89EEB6CD9A7A790AFCCFF234D7CB11B99EBF58CD0C589F20BDAC4F9F0E28F75E3E04E5B3DEBCE607A496D848D67FA7B49132C71B878FD5557E082A18ECA1FBDA94D4B"),
SHEX("B1579476972D42FA388FEEB8424834672C4D1A4225EE2DB89DEA7359"));
test_hash(&nettle_sha3_224, /* 195 octets */
SHEX("8F65F6BC59A85705016E2BAE7FE57980DE3127E5AB275F573D334F73F8603106EC3553016608EF2DD6E69B24BE0B7113BF6A760BA6E9CE1C48F9E186012CF96A1D4849D75DF5BB8315387FD78E9E153E76F8BA7EC6C8849810F59FB4BB9B004318210B37F1299526866F44059E017E22E96CBE418699D014C6EA01C9F0038B10299884DBEC3199BB05ADC94E955A1533219C1115FED0E5F21228B071F40DD57C4240D98D37B73E412FE0FA4703120D7C0C67972ED233E5DEB300A22605472FA3A3BA86"),
SHEX("A32EC69648B4FD9BA2431ED0FEF036188C19788D7DDF0D25B6B03ECD"));
test_hash(&nettle_sha3_224, /* 196 octets */
SHEX("84891E52E0D451813210C3FD635B39A03A6B7A7317B221A7ABC270DFA946C42669AACBBBDF801E1584F330E28C729847EA14152BD637B3D0F2B38B4BD5BF9C791C58806281103A3EABBAEDE5E711E539E6A8B2CF297CF351C078B4FA8F7F35CF61BEBF8814BF248A01D41E86C5715EA40C63F7375379A7EB1D78F27622FB468AB784AAABA4E534A6DFD1DF6FA15511341E725ED2E87F98737CCB7B6A6DFAE416477472B046BF1811187D151BFA9F7B2BF9ACDB23A3BE507CDF14CFDF517D2CB5FB9E4AB6"),
SHEX("2B8CF4C8D9E6717EBCE4F0584ADA59A8ACDFAB98AD7E33F355B77095"));
test_hash(&nettle_sha3_224, /* 197 octets */
SHEX("FDD7A9433A3B4AFABD7A3A5E3457E56DEBF78E84B7A0B0CA0E8C6D53BD0C2DAE31B2700C6128334F43981BE3B213B1D7A118D59C7E6B6493A86F866A1635C12859CFB9AD17460A77B4522A5C1883C3D6ACC86E6162667EC414E9A104AA892053A2B1D72165A855BACD8FAF8034A5DD9B716F47A0818C09BB6BAF22AA503C06B4CA261F557761989D2AFBD88B6A678AD128AF68672107D0F1FC73C5CA740459297B3292B281E93BCEB761BDE7221C3A55708E5EC84472CDDCAA84ECF23723CC0991355C6280"),
SHEX("E583849474F3C759B7A3093C7ABADD61425073AEA2678E278215708D"));
test_hash(&nettle_sha3_224, /* 198 octets */
SHEX("70A40BFBEF92277A1AAD72F6B79D0177197C4EBD432668CFEC05D099ACCB651062B5DFF156C0B27336687A94B26679CFDD9DAF7AD204338DD9C4D14114033A5C225BD11F217B5F4732DA167EE3F939262D4043FC9CBA92303B7B5E96AEA12ADDA64859DF4B86E9EE0B58E39091E6B188B408AC94E1294A8911245EE361E60E601EFF58D1D37639F3753BEC80EBB4EFDE25817436076623FC65415FE51D1B0280366D12C554D86743F3C3B6572E400361A60726131441BA493A83FBE9AFDA90F7AF1AE717238D"),
SHEX("10795D3ABCC077F4A1F5B5653C478F9DB42110EA9F34925470B3CD11"));
test_hash(&nettle_sha3_224, /* 199 octets */
SHEX("74356E449F4BF8644F77B14F4D67CB6BD9C1F5AE357621D5B8147E562B65C66585CAF2E491B48529A01A34D226D436959153815380D5689E30B35357CDAC6E08D3F2B0E88E200600D62BD9F5EAF488DF86A4470EA227006182E44809009868C4C280C43D7D64A5268FA719074960087B3A6ABC837882F882C837834535929389A12B2C78187E2EA07EF8B8EEF27DC85002C3AE35F1A50BEE6A1C48BA7E175F3316670B27983472AA6A61EED0A683A39EE323080620EA44A9F74411AE5CE99030528F9AB49C79F2"),
SHEX("31A843B4A9F332F3B6B099843540AA70651B26B80E0BD75B77F3AA9B"));
test_hash(&nettle_sha3_224, /* 200 octets */
SHEX("8C3798E51BC68482D7337D3ABB75DC9FFE860714A9AD73551E120059860DDE24AB87327222B64CF774415A70F724CDF270DE3FE47DDA07B61C9EF2A3551F45A5584860248FABDE676E1CD75F6355AA3EAEABE3B51DC813D9FB2EAA4F0F1D9F834D7CAD9C7C695AE84B329385BC0BEF895B9F1EDF44A03D4B410CC23A79A6B62E4F346A5E8DD851C2857995DDBF5B2D717AEB847310E1F6A46AC3D26A7F9B44985AF656D2B7C9406E8A9E8F47DCB4EF6B83CAACF9AEFB6118BFCFF7E44BEF6937EBDDC89186839B77"),
SHEX("1029CA117957D80F3C859E8394DD34969331CA3BCEDC436B1EAB0849"));
test_hash(&nettle_sha3_224, /* 201 octets */
SHEX("FA56BF730C4F8395875189C10C4FB251605757A8FECC31F9737E3C2503B02608E6731E85D7A38393C67DE516B85304824BFB135E33BF22B3A23B913BF6ACD2B7AB85198B8187B2BCD454D5E3318CACB32FD6261C31AE7F6C54EF6A7A2A4C9F3ECB81CE3555D4F0AD466DD4C108A90399D70041997C3B25345A9653F3C9A6711AB1B91D6A9D2216442DA2C973CBD685EE7643BFD77327A2F7AE9CB283620A08716DFB462E5C1D65432CA9D56A90E811443CD1ECB8F0DE179C9CB48BA4F6FEC360C66F252F6E64EDC96B"),
SHEX("6096E9914C1AC93A6809DE7AD91119C637B00BBD64DCC3E1FAC1E1ED"));
test_hash(&nettle_sha3_224, /* 202 octets */
SHEX("B6134F9C3E91DD8000740D009DD806240811D51AB1546A974BCB18D344642BAA5CD5903AF84D58EC5BA17301D5EC0F10CCD0509CBB3FD3FFF9172D193AF0F782252FD1338C7244D40E0E42362275B22D01C4C3389F19DD69BDF958EBE28E31A4FFE2B5F18A87831CFB7095F58A87C9FA21DB72BA269379B2DC2384B3DA953C7925761FED324620ACEA435E52B424A7723F6A2357374157A34CD8252351C25A1B232826CEFE1BD3E70FFC15A31E7C0598219D7F00436294D11891B82497BC78AA5363892A2495DF8C1EEF"),
SHEX("F583F07DF2327887C6F10A9B1D509A744F3C294A4227976E3C3722E8"));
test_hash(&nettle_sha3_224, /* 203 octets */
SHEX("C941CDB9C28AB0A791F2E5C8E8BB52850626AA89205BEC3A7E22682313D198B1FA33FC7295381354858758AE6C8EC6FAC3245C6E454D16FA2F51C4166FAB51DF272858F2D603770C40987F64442D487AF49CD5C3991CE858EA2A60DAB6A65A34414965933973AC2457089E359160B7CDEDC42F29E10A91921785F6B7224EE0B349393CDCFF6151B50B377D609559923D0984CDA6000829B916AB6896693EF6A2199B3C22F7DC5500A15B8258420E314C222BC000BC4E5413E6DD82C993F8330F5C6D1BE4BC79F08A1A0A46"),
SHEX("A9F43B9621FC5902DF2458FD53D0CDE90AAE7000855C67D853C7937A"));
test_hash(&nettle_sha3_224, /* 204 octets */
SHEX("4499EFFFAC4BCEA52747EFD1E4F20B73E48758BE915C88A1FFE5299B0B005837A46B2F20A9CB3C6E64A9E3C564A27C0F1C6AD1960373036EC5BFE1A8FC6A435C2185ED0F114C50E8B3E4C7ED96B06A036819C9463E864A58D6286F785E32A804443A56AF0B4DF6ABC57ED5C2B185DDEE8489EA080DEEEE66AA33C2E6DAB36251C402682B6824821F998C32163164298E1FAFD31BABBCFFB594C91888C6219079D907FDB438ED89529D6D96212FD55ABE20399DBEFD342248507436931CDEAD496EB6E4A80358ACC78647D043"),
SHEX("E9675FAAC37C93AA61FF9730679A3D1209ADBAD4652582DFF5B1BAAF"));
test_hash(&nettle_sha3_224, /* 205 octets */
SHEX("EECBB8FDFA4DA62170FD06727F697D81F83F601FF61E478105D3CB7502F2C89BF3E8F56EDD469D049807A38882A7EEFBC85FC9A950952E9FA84B8AFEBD3CE782D4DA598002827B1EB98882EA1F0A8F7AA9CE013A6E9BC462FB66C8D4A18DA21401E1B93356EB12F3725B6DB1684F2300A98B9A119E5D27FF704AFFB618E12708E77E6E5F34139A5A41131FD1D6336C272A8FC37080F041C71341BEE6AB550CB4A20A6DDB6A8E0299F2B14BC730C54B8B1C1C487B494BDCCFD3A53535AB2F231590BF2C4062FD2AD58F906A2D0D"),
SHEX("CDB500740812A0D70C68D0097DCC7ACA86EC066C89D36642879A74A5"));
test_hash(&nettle_sha3_224, /* 206 octets */
SHEX("E64F3E4ACE5C8418D65FEC2BC5D2A303DD458034736E3B0DF719098BE7A206DEAF52D6BA82316CAF330EF852375188CDE2B39CC94AA449578A7E2A8E3F5A9D68E816B8D16889FBC0EBF0939D04F63033AE9AE2BDAB73B88C26D6BD25EE460EE1EF58FB0AFA92CC539F8C76D3D097E7A6A63EBB9B5887EDF3CF076028C5BBD5B9DB3211371AD3FE121D4E9BF44229F4E1ECF5A0F9F0EBA4D5CEB72878AB22C3F0EB5A625323AC66F7061F4A81FAC834471E0C59553F108475FE290D43E6A055AE3EE46FB67422F814A68C4BE3E8C9"),
SHEX("66F205D7147991A940AFFB7401B692275338519A97608C584362FFEE"));
test_hash(&nettle_sha3_224, /* 207 octets */
SHEX("D2CB2D733033F9E91395312808383CC4F0CA974E87EC68400D52E96B3FA6984AC58D9AD0938DDE5A973008D818C49607D9DE2284E7618F1B8AED8372FBD52ED54557AF4220FAC09DFA8443011699B97D743F8F2B1AEF3537EBB45DCC9E13DFB438428EE190A4EFDB3CAEB7F3933117BF63ABDC7E57BEB4171C7E1AD260AB0587806C4D137B6316B50ABC9CCE0DFF3ACADA47BBB86BE777E617BBE578FF4519844DB360E0A96C6701290E76BB95D26F0F804C8A4F2717EAC4E7DE9F2CFF3BBC55A17E776C0D02856032A6CD10AD2838"),
SHEX("909FB29277AB2C4CE4279C485D4FBA7E18FF1A88C66DAF7ACF630310"));
test_hash(&nettle_sha3_224, /* 208 octets */
SHEX("F2998955613DD414CC111DF5CE30A995BB792E260B0E37A5B1D942FE90171A4AC2F66D4928D7AD377F4D0554CBF4C523D21F6E5F379D6F4B028CDCB9B1758D3B39663242FF3CB6EDE6A36A6F05DB3BC41E0D861B384B6DEC58BB096D0A422FD542DF175E1BE1571FB52AE66F2D86A2F6824A8CFAACBAC4A7492AD0433EEB15454AF8F312B3B2A577750E3EFBD370E8A8CAC1582581971FBA3BA4BD0D76E718DACF8433D33A59D287F8CC92234E7A271041B526E389EFB0E40B6A18B3AAF658E82ED1C78631FD23B4C3EB27C3FAEC8685"),
SHEX("ED535EC075C983A08F7D7AD5714EBC846F25E8661492B2B31978EDF2"));
test_hash(&nettle_sha3_224, /* 209 octets */
SHEX("447797E2899B72A356BA55BF4DF3ACCA6CDB1041EB477BD1834A9F9ACBC340A294D729F2F97DF3A610BE0FF15EDB9C6D5DB41644B9874360140FC64F52AA03F0286C8A640670067A84E017926A70438DB1BB361DEFEE7317021425F8821DEF26D1EFD77FC853B818545D055ADC9284796E583C76E6FE74C9AC2587AA46AA8F8804F2FEB5836CC4B3ABABAB8429A5783E17D5999F32242EB59EF30CD7ADABC16D72DBDB097623047C98989F88D14EAF02A7212BE16EC2D07981AAA99949DDF89ECD90333A77BC4E1988A82ABF7C7CAF3291"),
SHEX("87F15CC2AEC24168D8BBAF188825F3BB0178CFB5C5899F2FD042CE89"));
test_hash(&nettle_sha3_224, /* 210 octets */
SHEX("9F2C18ADE9B380C784E170FB763E9AA205F64303067EB1BCEA93DF5DAC4BF5A2E00B78195F808DF24FC76E26CB7BE31DC35F0844CDED1567BBA29858CFFC97FB29010331B01D6A3FB3159CC1B973D255DA9843E34A0A4061CABDB9ED37F241BFABB3C20D32743F4026B59A4CCC385A2301F83C0B0A190B0F2D01ACB8F0D41111E10F2F4E149379275599A52DC089B35FDD5234B0CFB7B6D8AEBD563CA1FA653C5C021DFD6F5920E6F18BFAFDBECBF0AB00281333ED50B9A999549C1C8F8C63D7626C48322E9791D5FF72294049BDE91E73F8"),
SHEX("31BB872545217FDBF11077E86B1EE451475C31DC5E0E636EFBE50825"));
test_hash(&nettle_sha3_224, /* 211 octets */
SHEX("AE159F3FA33619002AE6BCCE8CBBDD7D28E5ED9D61534595C4C9F43C402A9BB31F3B301CBFD4A43CE4C24CD5C9849CC6259ECA90E2A79E01FFBAC07BA0E147FA42676A1D668570E0396387B5BCD599E8E66AAED1B8A191C5A47547F61373021FA6DEADCB55363D233C24440F2C73DBB519F7C9FA5A8962EFD5F6252C0407F190DFEFAD707F3C7007D69FF36B8489A5B6B7C557E79DD4F50C06511F599F56C896B35C917B63BA35C6FF8092BAF7D1658E77FC95D8A6A43EEB4C01F33F03877F92774BE89C1114DD531C011E53A34DC248A2F0E6"),
SHEX("26D69F0AE8E4DC61C6354FF570FDD913CAF21C18697F0371F2D323AF"));
test_hash(&nettle_sha3_224, /* 212 octets */
SHEX("3B8E97C5FFC2D6A40FA7DE7FCEFC90F3B12C940E7AB415321E29EE692DFAC799B009C99DCDDB708FCE5A178C5C35EE2B8617143EDC4C40B4D313661F49ABDD93CEA79D117518805496FE6ACF292C4C2A1F76B403A97D7C399DAF85B46AD84E16246C67D6836757BDE336C290D5D401E6C1386AB32797AF6BB251E9B2D8FE754C47482B72E0B394EAB76916126FD68EA7D65EB93D59F5B4C5AC40F7C3B37E7F3694F29424C24AF8C8F0EF59CD9DBF1D28E0E10F799A6F78CAD1D45B9DB3D7DEE4A7059ABE99182714983B9C9D44D7F5643596D4F3"),
SHEX("175393534D90B614B158105C95E18A1052A56D0E775EA1CF51AD5853"));
test_hash(&nettle_sha3_224, /* 213 octets */
SHEX("3434EC31B10FAFDBFEEC0DD6BD94E80F7BA9DCA19EF075F7EB017512AF66D6A4BCF7D16BA0819A1892A6372F9B35BCC7CA8155EE19E8428BC22D214856ED5FA9374C3C09BDE169602CC219679F65A1566FC7316F4CC3B631A18FB4449FA6AFA16A3DB2BC4212EFF539C67CF184680826535589C7111D73BFFCE431B4C40492E763D9279560AAA38EB2DC14A212D723F994A1FE656FF4DD14551CE4E7C621B2AA5604A10001B2878A897A28A08095C325E10A26D2FB1A75BFD64C250309BB55A44F23BBAC0D5516A1C687D3B41EF2FBBF9CC56D4739"),
SHEX("3DECD71DA22639985CF242F2FAE7172459042C826495C8D8D95C3719"));
test_hash(&nettle_sha3_224, /* 214 octets */
SHEX("7C7953D81C8D208FD1C97681D48F49DD003456DE60475B84070EF4847C333B74575B1FC8D2A186964485A3B8634FEAA3595AAA1A2F4595A7D6B6153563DEE31BBAC443C8A33EED6D5D956A980A68366C2527B550EE950250DFB691EACBD5D56AE14B970668BE174C89DF2FEA43AE52F13142639C884FD62A3683C0C3792F0F24AB1318BCB27E21F4737FAB62C77EA38BC8FD1CF41F7DAB64C13FEBE7152BF5BB7AB5A78F5346D43CC741CB6F72B7B8980F268B68BF62ABDFB1577A52438FE14B591498CC95F071228460C7C5D5CEB4A7BDE588E7F21C"),
SHEX("2D0A56864BBEC6449FBF7B2EAE53DA46647183B56FA4EDB1602E5163"));
test_hash(&nettle_sha3_224, /* 215 octets */
SHEX("7A6A4F4FDC59A1D223381AE5AF498D74B7252ECF59E389E49130C7EAEE626E7BD9897EFFD92017F4CCDE66B0440462CDEDFD352D8153E6A4C8D7A0812F701CC737B5178C2556F07111200EB627DBC299CAA792DFA58F35935299FA3A3519E9B03166DFFA159103FFA35E8577F7C0A86C6B46FE13DB8E2CDD9DCFBA85BDDDCCE0A7A8E155F81F712D8E9FE646153D3D22C811BD39F830433B2213DD46301941B59293FD0A33E2B63ADBD95239BC01315C46FDB678875B3C81E053A40F581CFBEC24A1404B1671A1B88A6D06120229518FB13A74CA0AC5AE"),
SHEX("A0FF9E11FBB451943A17E3AC510DE0B582BB072B16DC4E03F9E4019F"));
test_hash(&nettle_sha3_224, /* 216 octets */
SHEX("D9FAA14CEBE9B7DE551B6C0765409A33938562013B5E8E0E1E0A6418DF7399D0A6A771FB81C3CA9BD3BB8E2951B0BC792525A294EBD1083688806FE5E7F1E17FD4E3A41D00C89E8FCF4A363CAEDB1ACB558E3D562F1302B3D83BB886ED27B76033798131DAB05B4217381EAAA7BA15EC820BB5C13B516DD640EAEC5A27D05FDFCA0F35B3A5312146806B4C0275BCD0AAA3B2017F346975DB566F9B4D137F4EE10644C2A2DA66DEECA5342E236495C3C6280528BFD32E90AF4CD9BB908F34012B52B4BC56D48CC8A6B59BAB014988EABD12E1A0A1C2E170E7"),
SHEX("4FEFBE74645949A1291C6F6F05EAF4B780EA01EC5EA5105ECDCB984A"));
test_hash(&nettle_sha3_224, /* 217 octets */
SHEX("2D8427433D0C61F2D96CFE80CF1E932265A191365C3B61AAA3D6DCC039F6BA2AD52A6A8CC30FC10F705E6B7705105977FA496C1C708A277A124304F1FC40911E7441D1B5E77B951AAD7B01FD5DB1B377D165B05BBF898042E39660CAF8B279FE5229D1A8DB86C0999ED65E53D01CCBC4B43173CCF992B3A14586F6BA42F5FE30AFA8AE40C5DF29966F9346DA5F8B35F16A1DE3AB6DE0F477D8D8660918060E88B9B9E9CA6A4207033B87A812DBF5544D39E4882010F82B6CE005F8E8FF6FE3C3806BC2B73C2B83AFB704345629304F9F86358712E9FAE3CA3E"),
SHEX("7CC9EEBBE0DF46A398233FA31286F8A530292B53E48BA54B6AE40472"));
test_hash(&nettle_sha3_224, /* 218 octets */
SHEX("5E19D97887FCAAC0387E22C6F803C34A3DACD2604172433F7A8A7A526CA4A2A1271ECFC5D5D7BE5AC0D85D921095350DFC65997D443C21C8094E0A3FEFD2961BCB94AED03291AE310CCDA75D8ACE4BC7D89E7D3E5D1650BDA5D668B8B50BFC8E608E184F4D3A9A2BADC4FF5F07E0C0BC8A9F2E0B2A26FD6D8C550008FAAAB75FD71AF2A424BEC9A7CD9D83FAD4C8E9319115656A8717D3B523A68FF8004258B9990ED362308461804BA3E3A7E92D8F2FFAE5C2FBA55BA5A3C27C0A2F71BD711D2FE1799C2ADB31B200035481E9EE5C4ADF2AB9C0FA50B23975CF"),
SHEX("03D718DA677C4018E52288BB30E4E6E732A16144931176F0A8C73970"));
test_hash(&nettle_sha3_224, /* 219 octets */
SHEX("C8E976AB4638909387CE3B8D4E510C3230E5690E02C45093B1D297910ABC481E56EEA0F296F98379DFC9080AF69E73B2399D1C143BEE80AE1328162CE1BA7F6A8374679B20AACD380EB4E61382C99998704D62701AFA914F9A2705CDB065885F50D086C3EB5753700C387118BB142F3E6DA1E988DFB31AC75D7368931E45D1391A274B22F83CEB072F9BCABC0B216685BFD789F5023971024B1878A205442522F9EA7D8797A4102A3DF41703768251FD5E017C85D1200A464118AA35654E7CA39F3C375B8EF8CBE7534DBC64BC20BEFB417CF60EC92F63D9EE7397"),
SHEX("A9ABB430FC1B3D8C6CDEB5319878E7B12B118E2E03F40562A376418C"));
test_hash(&nettle_sha3_224, /* 220 octets */
SHEX("7145FA124B7429A1FC2231237A949BA7201BCC1822D3272DE005B682398196C25F7E5CC2F289FBF44415F699CB7FE6757791B1443410234AE061EDF623359E2B4E32C19BF88450432DD01CAA5EB16A1DC378F391CA5E3C4E5F356728BDDD4975DB7C890DA8BBC84CC73FF244394D0D48954978765E4A00B593F70F2CA082673A261ED88DBCEF1127728D8CD89BC2C597E9102CED6010F65FA75A14EBE467FA57CE3BD4948B6867D74A9DF5C0EC6F530CBF2EE61CE6F06BC8F2864DFF5583776B31DF8C7FFCB61428A56BF7BD37188B4A5123BBF338393AF46EDA85E6"),
SHEX("4A7A58B337872189A06B53B6BCC50C29EF9D0BBC491832907AF14EC8"));
test_hash(&nettle_sha3_224, /* 221 octets */
SHEX("7FDFADCC9D29BAD23AE038C6C65CDA1AEF757221B8872ED3D75FF8DF7DA0627D266E224E812C39F7983E4558BFD0A1F2BEF3FEB56BA09120EF762917B9C093867948547AEE98600D10D87B20106878A8D22C64378BF634F7F75900C03986B077B0BF8B740A82447B61B99FEE5376C5EB6680EC9E3088F0BDD0C56883413D60C1357D3C811950E5890E7600103C916341B80C743C6A852B7B4FB60C3BA21F3BC15B8382437A68454779CF3CD7F9F90CCC8EF28D0B706535B1E4108EB5627BB45D719CB046839AEE311CA1ABDC8319E050D67972CB35A6B1601B25DBF487"),
SHEX("808E01CD273919BA1BFF011E0E7094EC6D5C4962912B08F11965AB58"));
test_hash(&nettle_sha3_224, /* 222 octets */
SHEX("988638219FD3095421F826F56E4F09E356296B628C3CE6930C9F2E758FD1A80C8273F2F61E4DAAE65C4F110D3E7CA0965AC7D24E34C0DC4BA2D6FF0BF5BBE93B3585F354D7543CB542A1AA54674D375077F2D360A8F4D42F3DB131C3B7AB7306267BA107659864A90C8C909460A73621D1F5D9D3FD95BEB19B23DB1CB6C0D0FBA91D36891529B8BD8263CAA1BAB56A4AFFAED44962DF096D8D5B1EB845EF31188B3E10F1AF811A13F156BEB7A288AAE593EBD1471B624AA1A7C6ADF01E2200B3D72D88A3AED3100C88231E41EFC376906F0B580DC895F080FDA5741DB1CB"),
SHEX("DCBCC30B6909FDF00650F1A10CFBBD419408F9D37F378C5CA693B803"));
test_hash(&nettle_sha3_224, /* 223 octets */
SHEX("5AAB62756D307A669D146ABA988D9074C5A159B3DE85151A819B117CA1FF6597F6156E80FDD28C9C3176835164D37DA7DA11D94E09ADD770B68A6E081CD22CA0C004BFE7CD283BF43A588DA91F509B27A6584C474A4A2F3EE0F1F56447379240A5AB1FB77FDCA49B305F07BA86B62756FB9EFB4FC225C86845F026EA542076B91A0BC2CDD136E122C659BE259D98E5841DF4C2F60330D4D8CDEE7BF1A0A244524EECC68FF2AEF5BF0069C9E87A11C6E519DE1A4062A10C83837388F7EF58598A3846F49D499682B683C4A062B421594FAFBC1383C943BA83BDEF515EFCF10D"),
SHEX("BE077F12762EF51859B6C520B19231E30442AC268CE4FD47366FF9F1"));
test_hash(&nettle_sha3_224, /* 224 octets */
SHEX("47B8216AA0FBB5D67966F2E82C17C07AA2D6327E96FCD83E3DE7333689F3EE79994A1BF45082C4D725ED8D41205CB5BCDF5C341F77FACB1DA46A5B9B2CBC49EADF786BCD881F371A95FA17DF73F606519AEA0FF79D5A11427B98EE7F13A5C00637E2854134691059839121FEA9ABE2CD1BCBBBF27C74CAF3678E05BFB1C949897EA01F56FFA4DAFBE8644611685C617A3206C7A7036E4AC816799F693DAFE7F19F303CE4EBA09D21E03610201BFC665B72400A547A1E00FA9B7AD8D84F84B34AEF118515E74DEF11B9188BD1E1F97D9A12C30132EC2806339BDADACDA2FD8B78"),
SHEX("25C425265AB07D0A8EC659D4D5EE618BDE87003B7255FF4B5315F1C7"));
test_hash(&nettle_sha3_224, /* 225 octets */
SHEX("8CFF1F67FE53C098896D9136389BD8881816CCAB34862BB67A656E3D98896F3CE6FFD4DA73975809FCDF9666760D6E561C55238B205D8049C1CEDEEF374D1735DAA533147BFA960B2CCE4A4F254176BB4D1BD1E89654432B8DBE1A135C42115B394B024856A2A83DC85D6782BE4B444239567CCEC4B184D4548EAE3FF6A192F343292BA2E32A0F267F31CC26719EB85245D415FB897AC2DA433EE91A99424C9D7F1766A44171D1651001C38FC79294ACCC68CEB5665D36218454D3BA169AE058A831338C17743603F81EE173BFC0927464F9BD728DEE94C6AEAB7AAE6EE3A627E8"),
SHEX("046CF62C41CE9B0F54B667558063023F59887BADA9CC288414ADEE7F"));
test_hash(&nettle_sha3_224, /* 226 octets */
SHEX("EACD07971CFF9B9939903F8C1D8CBB5D4DB1B548A85D04E037514A583604E787F32992BF2111B97AC5E8A938233552731321522AB5E8583561260B7D13EBEEF785B23A41FD8576A6DA764A8ED6D822D4957A545D5244756C18AA80E1AAD4D1F9C20D259DEE1711E2CC8FD013169FB7CC4CE38B362F8E0936AE9198B7E838DCEA4F7A5B9429BB3F6BBCF2DC92565E3676C1C5E6EB3DD2A0F86AA23EDD3D0891F197447692794B3DFA269611AD97F72B795602B4FDB198F3FD3EB41B415064256E345E8D8C51C555DC8A21904A9B0F1AD0EFFAB7786AAC2DA3B196507E9F33CA356427"),
SHEX("37E3844080986179FDA99E9B8C54E294643060795B66E810E3E25D9E"));
test_hash(&nettle_sha3_224, /* 227 octets */
SHEX("23AC4E9A42C6EF45C3336CE6DFC2FF7DE8884CD23DC912FEF0F7756C09D335C189F3AD3A23697ABDA851A81881A0C8CCAFC980AB2C702564C2BE15FE4C4B9F10DFB2248D0D0CB2E2887FD4598A1D4ACDA897944A2FFC580FF92719C95CF2AA42DC584674CB5A9BC5765B9D6DDF5789791D15F8DD925AA12BFFAFBCE60827B490BB7DF3DDA6F2A143C8BF96ABC903D83D59A791E2D62814A89B8080A28060568CF24A80AE61179FE84E0FFAD00388178CB6A617D37EFD54CC01970A4A41D1A8D3DDCE46EDBBA4AB7C90AD565398D376F431189CE8C1C33E132FEAE6A8CD17A61C630012"),
SHEX("3B503D615E54132B42CAC1A0450A0D7E2EDC63ED87BF109C509C7987"));
test_hash(&nettle_sha3_224, /* 228 octets */
SHEX("0172DF732282C9D488669C358E3492260CBE91C95CFBC1E3FEA6C4B0EC129B45F242ACE09F152FC6234E1BEE8AAB8CD56E8B486E1DCBA9C05407C2F95DA8D8F1C0AF78EE2ED82A3A79EC0CB0709396EE62AADB84F8A4EE8A7CCCA3C1EE84E302A09EA802204AFECF04097E67D0F8E8A9D2651126C0A598A37081E42D168B0AE8A71951C524259E4E2054E535B779679BDADE566FE55700858618E626B4A0FAF895BCCE9011504A49E05FD56127EAE3D1F8917AFB548ECADABDA1020111FEC9314C413498A360B08640549A22CB23C731ACE743252A8227A0D2689D4C6001606678DFB921"),
SHEX("CB40837DAF4A882538464DEC0A999DA482B4AAE08708EA6D5D7FF461"));
test_hash(&nettle_sha3_224, /* 229 octets */
SHEX("3875B9240CF3E0A8B59C658540F26A701CF188496E2C2174788B126FD29402D6A75453BA0635284D08835F40051A2A9683DC92AFB9383719191231170379BA6F4ADC816FECBB0F9C446B785BF520796841E58878B73C58D3EBB097CE4761FDEABE15DE2F319DFBAF1742CDEB389559C788131A6793E193856661376C81CE9568DA19AA6925B47FFD77A43C7A0E758C37D69254909FF0FBD415EF8EB937BCD49F91468B49974C07DC819ABD67395DB0E05874FF83DDDAB895344ABD0E7111B2DF9E58D76D85AD98106B36295826BE04D435615595605E4B4BB824B33C4AFEB5E7BB0D19F909"),
SHEX("EAE911E66661DCD3472B458A48B74730468923C7ABCAC7F311F02463"));
test_hash(&nettle_sha3_224, /* 230 octets */
SHEX("747CC1A59FEFBA94A9C75BA866C30DC5C1CB0C0F8E9361D98484956DD5D1A40F6184AFBE3DAC9F76028D1CAECCFBF69199C6CE2B4C092A3F4D2A56FE5A33A00757F4D7DEE5DFB0524311A97AE0668A47971B95766E2F6DD48C3F57841F91F04A00AD5EA70F2D479A2620DC5CD78EAAB3A3B011719B7E78D19DDF70D9423798AF77517EBC55392FCD01FC600D8D466B9E7A7A85BF33F9CC5419E9BD874DDFD60981150DDAF8D7FEBAA4374F0872A5628D318000311E2F5655365AD4D407C20E5C04DF17A222E7DEEC79C5AB1116D8572F91CD06E1CCC7CED53736FC867FD49ECEBE6BF8082E8A"),
SHEX("307D5A8BA5865A4D281ACB2F3C5EF16E3B11BCD8C0F82D22D47C2CC8"));
test_hash(&nettle_sha3_224, /* 231 octets */
SHEX("57AF971FCCAEC97435DC2EC9EF0429BCEDC6B647729EA168858A6E49AC1071E706F4A5A645CA14E8C7746D65511620682C906C8B86EC901F3DDED4167B3F00B06CBFAC6AEE3728051B3E5FF10B4F9ED8BD0B8DA94303C833755B3CA3AEDDF0B54BC8D6632138B5D25BAB03D17B3458A9D782108006F5BB7DE75B5C0BA854B423D8BB801E701E99DC4FEAAD59BC1C7112453B04D33EA3635639FB802C73C2B71D58A56BBD671B18FE34ED2E3DCA38827D63FDB1D4FB3285405004B2B3E26081A8FF08CD6D2B08F8E7B7E90A2AB1ED7A41B1D0128522C2F8BFF56A7FE67969422CE839A9D4608F03"),
SHEX("58666B325D81CBE6A4BBAD91720E2BA93C70EA114E7F77323C5BE486"));
test_hash(&nettle_sha3_224, /* 232 octets */
SHEX("04E16DEDC1227902BAAF332D3D08923601BDD64F573FAA1BB7201918CFE16B1E10151DAE875DA0C0D63C59C3DD050C4C6A874011B018421AFC4623AB0381831B2DA2A8BA42C96E4F70864AC44E106F94311051E74C77C1291BF5DB9539E69567BF6A11CF6932BBBAD33F8946BF5814C066D851633D1A513510039B349939BFD42B858C21827C8FF05F1D09B1B0765DC78A135B5CA4DFBA0801BCADDFA175623C8B647EACFB4444B85A44F73890607D06D507A4F8393658788669F6EF4DEB58D08C50CA0756D5E2F49D1A7AD73E0F0B3D3B5F090ACF622B1878C59133E4A848E05153592EA81C6FBF"),
SHEX("BC296FFD39381CF1C96228A9F380F41C871B8788C654ED9B384C17FE"));
test_hash(&nettle_sha3_224, /* 233 octets */
SHEX("7C815C384EEE0F288ECE27CCED52A01603127B079C007378BC5D1E6C5E9E6D1C735723ACBBD5801AC49854B2B569D4472D33F40BBB8882956245C366DC3582D71696A97A4E19557E41E54DEE482A14229005F93AFD2C4A7D8614D10A97A9DFA07F7CD946FA45263063DDD29DB8F9E34DB60DAA32684F0072EA2A9426ECEBFA5239FB67F29C18CBAA2AF6ED4BF4283936823AC1790164FEC5457A9CBA7C767CA59392D94CAB7448F50EB34E9A93A80027471CE59736F099C886DEA1AB4CBA4D89F5FC7AE2F21CCD27F611ECA4626B2D08DC22382E92C1EFB2F6AFDC8FDC3D2172604F5035C46B8197D3"),
SHEX("0CCEAE713E5E39BCEFE7A2273004816FE005D5EDFB2A965CC9AC9948"));
test_hash(&nettle_sha3_224, /* 234 octets */
SHEX("E29D505158DBDD937D9E3D2145658EE6F5992A2FC790F4F608D9CDB44A091D5B94B88E81FAC4FDF5C49442F13B911C55886469629551189EAFF62488F1A479B7DB11A1560E198DDCCCCF50159093425FF7F1CB8D1D1246D0978764087D6BAC257026B090EFAE8CEC5F22B6F21C59ACE1AC7386F5B8837CA6A12B6FBF5534DD0560EF05CA78104D3B943DDB220FEAEC89AA5E692A00F822A2AB9A2FE60350D75E7BE16FF2526DC643872502D01F42F188ABED0A6E9A6F5FD0D1CE7D5755C9FFA66B0AF0B20BD806F08E06156690D81AC811778CA3DAC2C249B96002017FCE93E507E3B953ACF99964B847"),
SHEX("7997FDF30837D8B25E85FC01316E31B61EE814490DA002A04816D7CA"));
test_hash(&nettle_sha3_224, /* 235 octets */
SHEX("D85588696F576E65ECA0155F395F0CFACD83F36A99111ED5768DF2D116D2121E32357BA4F54EDE927F189F297D3A97FAD4E9A0F5B41D8D89DD7FE20156799C2B7B6BF9C957BA0D6763F5C3BC5129747BBB53652B49290CFF1C87E2CDF2C4B95D8AAEE09BC8FBFA6883E62D237885810491BFC101F1D8C636E3D0EDE838AD05C207A3DF4FAD76452979EB99F29AFAECEDD1C63B8D36CF378454A1BB67A741C77AC6B6B3F95F4F02B64DABC15438613EA49750DF42EE90101F115AA9ABB9FF64324DDE9DABBB01054E1BD6B4BCDC7930A44C2300D87CA78C06924D0323AD7887E46C90E8C4D100ACD9EED21E"),
SHEX("9897B479871AC73DABBE6221E27BFA67278F2BB044E3D0726FCB2B81"));
test_hash(&nettle_sha3_224, /* 236 octets */
SHEX("3A12F8508B40C32C74492B66323375DCFE49184C78F73179F3314B79E63376B8AC683F5A51F1534BD729B02B04D002F55CBD8E8FC9B5EC1EA6BBE6A0D0E7431518E6BA45D124035F9D3DCE0A8BB7BF1430A9F657E0B4EA9F20EB20C786A58181A1E20A96F1628F8728A13BDF7A4B4B32FC8AA7054CC4881AE7FA19AFA65C6C3EE1B3ADE3192AF42054A8A911B8EC1826865D46D93F1E7C5E2B7813C92A506E53886F3D4701BB93D2A681AD109C845904BB861AF8AF0646B6E399B38B614051D34F6842563A0F37EC00CB3D865FC5D746C4987DE2A65071100883A2A9C7A2BFE1E2DD603D9EA24DC7C5FD06BE"),
SHEX("EAD2620FBC4BDFB14AEC8C7B9AA882BA3EB2AACC9A15D7D36DBA086D"));
test_hash(&nettle_sha3_224, /* 237 octets */
SHEX("1861EDCE46FA5AD17E1FF1DEAE084DEC580F97D0A67885DFE834B9DFAC1AE076742CE9E267512CA51F6DF5A455AF0C5FD6ABF94ACEA103A3370C354485A7846FB84F3AC7C2904B5B2FBF227002CE512133BB7E1C4E50057BFD1E44DB33C7CDB969A99E284B184F50A14B068A1FC5009D9B298DBE92239572A7627AAC02ABE8F3E3B473417F36D4D2505D16B7577F4526C9D94A270A2DFE450D06DA8F6FA956879A0A55CFE99E742EA555EA477BA3E9B44CCD508C375423611AF92E55345DC215779B2D5119EBA49C71D49B9FE3F1569FA24E5CA3E332D042422A8B8158D3EC66A80012976F31FFDF305F0C9C5E"),
SHEX("545E59812C7AEA1BD1CD48880D6650117DFD9E58A791DAC1072B19DA"));
test_hash(&nettle_sha3_224, /* 238 octets */
SHEX("08D0FFDE3A6E4EF65608EA672E4830C12943D7187CCFF08F4941CFC13E545F3B9C7AD5EEBBE2B01642B486CAF855C2C73F58C1E4E3391DA8E2D63D96E15FD84953AE5C231911B00AD6050CD7AAFDAAC9B0F663AE6AAB45519D0F5391A541707D479034E73A6AD805AE3598096AF078F1393301493D663DD71F83869CA27BA508B7E91E81E128C1716DC3ACFE3084B2201E04CF8006617EECF1B640474A5D45CFDE9F4D3EF92D6D055B909892194D8A8218DB6D8203A84261D200D71473D7488F3427416B6896C137D455F231071CACBC86E0415AB88AEC841D96B7B8AF41E05BB461A40645BF176601F1E760DE5F"),
SHEX("7C2FA00961BCF020B95A0ED7193EA3583340BBD37898EF6A464C1940"));
test_hash(&nettle_sha3_224, /* 239 octets */
SHEX("D782ABB72A5BE3392757BE02D3E45BE6E2099D6F000D042C8A543F50ED6EBC055A7F133B0DD8E9BC348536EDCAAE2E12EC18E8837DF7A1B3C87EC46D50C241DEE820FD586197552DC20BEEA50F445A07A38F1768A39E2B2FF05DDDEDF751F1DEF612D2E4D810DAA3A0CC904516F9A43AF660315385178A529E51F8AAE141808C8BC5D7B60CAC26BB984AC1890D0436EF780426C547E94A7B08F01ACBFC4A3825EAE04F520A9016F2FB8BF5165ED12736FC71E36A49A73614739EAA3EC834069B1B40F1350C2B3AB885C02C640B9F7686ED5F99527E41CFCD796FE4C256C9173186C226169FF257954EBDA81C0E5F99"),
SHEX("232DB22EB2C19109AFEFB71918EA2DAA7C0D76652E1884EA7A8AE646"));
test_hash(&nettle_sha3_224, /* 240 octets */
SHEX("5FCE8109A358570E40983E1184E541833BB9091E280F258CFB144387B05D190E431CB19BAA67273BA0C58ABE91308E1844DCD0B3678BAA42F335F2FA05267A0240B3C718A5942B3B3E3BFA98A55C25A1466E8D7A603722CB2BBF03AFA54CD769A99F310735EE5A05DAE2C22D397BD95635F58C48A67F90E1B73AAFCD3F82117F0166657838691005B18DA6F341D6E90FC1CDB352B30FAE45D348294E501B63252DE14740F2B85AE5299DDEC3172DE8B6D0BA219A20A23BB5E10FF434D39DB3F583305E9F5C039D98569E377B75A70AB837D1DF269B8A4B566F40BB91B577455FD3C356C914FA06B9A7CE24C7317A172D"),
SHEX("DB85AF5CFCE746240E6D44E73CEF66A72CE5968284D35FFEF7FBFF6C"));
test_hash(&nettle_sha3_224, /* 241 octets */
SHEX("6172F1971A6E1E4E6170AFBAD95D5FEC99BF69B24B674BC17DD78011615E502DE6F56B86B1A71D3F4348087218AC7B7D09302993BE272E4A591968AEF18A1262D665610D1070EE91CC8DA36E1F841A69A7A682C580E836941D21D909A3AFC1F0B963E1CA5AB193E124A1A53DF1C587470E5881FB54DAE1B0D840F0C8F9D1B04C645BA1041C7D8DBF22030A623AA15638B3D99A2C400FF76F3252079AF88D2B37F35EE66C1AD7801A28D3D388AC450B97D5F0F79E4541755356B3B1A5696B023F39AB7AB5F28DF4202936BC97393B93BC915CB159EA1BD7A0A414CB4B7A1AC3AF68F50D79F0C9C7314E750F7D02FAA58BFA"),
SHEX("A1EB42FB0792361F0D6809A2E8DC062F09F2855B39BC2C4B7F54311E"));
test_hash(&nettle_sha3_224, /* 242 octets */
SHEX("5668ECD99DFBE215C4118398AC9C9EAF1A1433FAB4CCDD3968064752B625EA944731F75D48A27D047D67547F14DD0FFAA55FA5E29F7AF0D161D85EAFC4F2029B717C918EAB9D304543290BDBA7158B68020C0BA4E079BC95B5BC0FC044A992B94B4CCD3BD66D0EABB5DBBAB904D62E00752C4E3B0091D773BCF4C14B4377DA3EFFF824B1CB2FA01B32D1E46C909E626ED2DAE920F4C7DBEB635BC754FACBD8D49BEBA3F23C1C41CCBFCD0EE0C114E69737F5597C0BF1D859F0C767E18002AE8E39C26261FFDE2920D3D0BAF0E906138696CFE5B7E32B600F45DF3AAA39932F3A7DF95B60FA8712A2271FCAF3911CE7B511B1"),
SHEX("1AF4A3AB9A07CF064C254D122CC7DE15E0F0D3CA3DFA50EA1C43A78D"));
test_hash(&nettle_sha3_224, /* 243 octets */
SHEX("03D625488354DF30E3F875A68EDFCF340E8366A8E1AB67F9D5C5486A96829DFAC0578289082B2A62117E1CF418B43B90E0ADC881FC6AE8105C888E9ECD21AEA1C9AE1A4038DFD17378FED71D02AE492087D7CDCD98F746855227967CB1AB4714261EE3BEAD3F4DB118329D3EBEF4BC48A875C19BA763966DA0EBEA800E01B2F50B00E9DD4CACA6DCB314D00184EF71EA2391D760C950710DB4A70F9212FFC54861F9DC752CE18867B8AD0C48DF8466EF7231E7AC567F0EB55099E622EBB86CB237520190A61C66AD34F1F4E289CB3282AE3EAAC6152ED24D2C92BAE5A7658252A53C49B7B02DFE54FDB2E90074B6CF310AC661"),
SHEX("C14D43525E18892C79142D887D2AD3992848B72CCC087F64F0F1D621"));
test_hash(&nettle_sha3_224, /* 244 octets */
SHEX("2EDC282FFB90B97118DD03AAA03B145F363905E3CBD2D50ECD692B37BF000185C651D3E9726C690D3773EC1E48510E42B17742B0B0377E7DE6B8F55E00A8A4DB4740CEE6DB0830529DD19617501DC1E9359AA3BCF147E0A76B3AB70C4984C13E339E6806BB35E683AF8527093670859F3D8A0FC7D493BCBA6BB12B5F65E71E705CA5D6C948D66ED3D730B26DB395B3447737C26FAD089AA0AD0E306CB28BF0ACF106F89AF3745F0EC72D534968CCA543CD2CA50C94B1456743254E358C1317C07A07BF2B0ECA438A709367FAFC89A57239028FC5FECFD53B8EF958EF10EE0608B7F5CB9923AD97058EC067700CC746C127A61EE3"),
SHEX("116C0462D50D57F948015EC74BE9015707313712B45883C02FE84E1E"));
test_hash(&nettle_sha3_224, /* 245 octets */
SHEX("90B28A6AA1FE533915BCB8E81ED6CACDC10962B7FF82474F845EEB86977600CF70B07BA8E3796141EE340E3FCE842A38A50AFBE90301A3BDCC591F2E7D9DE53E495525560B908C892439990A2CA2679C5539FFDF636777AD9C1CDEF809CDA9E8DCDB451ABB9E9C17EFA4379ABD24B182BD981CAFC792640A183B61694301D04C5B3EAAD694A6BD4CC06EF5DA8FA23B4FA2A64559C5A68397930079D250C51BCF00E2B16A6C49171433B0AADFD80231276560B80458DD77089B7A1BBCC9E7E4B9F881EACD6C92C4318348A13F4914EB27115A1CFC5D16D7FD94954C3532EFACA2CAB025103B2D02C6FD71DA3A77F417D7932685888A"),
SHEX("96F7111176641F6373701BA594090079146D4220F30B5120C12498BA"));
test_hash(&nettle_sha3_224, /* 246 octets */
SHEX("2969447D175490F2AA9BB055014DBEF2E6854C95F8D60950BFE8C0BE8DE254C26B2D31B9E4DE9C68C9ADF49E4EE9B1C2850967F29F5D08738483B417BB96B2A56F0C8ACA632B552059C59AAC3F61F7B45C966B75F1D9931FF4E596406378CEE91AAA726A3A84C33F37E9CDBE626B5745A0B06064A8A8D56E53AAF102D23DD9DF0A3FDF7A638509A6761A33FA42FA8DDBD8E16159C93008B53765019C3F0E9F10B144CE2AC57F5D7297F9C9949E4FF68B70D339F87501CE8550B772F32C6DA8AD2CE2100A895D8B08FA1EEAD7C376B407709703C510B50F87E73E43F8E7348F87C3832A547EF2BBE5799ABEDCF5E1F372EA809233F006"),
SHEX("9EDDAB2C9C60B122503C1C30EC6E74050EE13C7E103A05F9ED41D992"));
test_hash(&nettle_sha3_224, /* 247 octets */
SHEX("721645633A44A2C78B19024EAECF58575AB23C27190833C26875DC0F0D50B46AEA9C343D82EA7D5B3E50EC700545C615DAEAEA64726A0F05607576DCD396D812B03FB6551C641087856D050B10E6A4D5577B82A98AFB89CEE8594C9DC19E79FEFF0382FCFD127F1B803A4B9946F4AC9A4378E1E6E041B1389A53E3450CD32D9D2941B0CBABDB50DA8EA2513145164C3AB6BCBD251C448D2D4B087AC57A59C2285D564F16DA4ED5E607ED979592146FFB0EF3F3DB308FB342DF5EB5924A48256FC763141A278814C82D6D6348577545870AE3A83C7230AC02A1540FE1798F7EF09E335A865A2AE0949B21E4F748FB8A51F44750E213A8FB"),
SHEX("54CC87B9655180C0E1C6672350AE1952DDF51EE5D7E215569652AA2E"));
test_hash(&nettle_sha3_224, /* 248 octets */
SHEX("6B860D39725A14B498BB714574B4D37CA787404768F64C648B1751B353AC92BAC2C3A28EA909FDF0423336401A02E63EC24325300D823B6864BB701F9D7C7A1F8EC9D0AE3584AA6DD62EA1997CD831B4BABD9A4DA50932D4EFDA745C61E4130890E156AEE6113716DAF95764222A91187DB2EFFEA49D5D0596102D619BD26A616BBFDA8335505FBB0D90B4C180D1A2335B91538E1668F9F9642790B4E55F9CAB0FE2BDD2935D001EE6419ABAB5457880D0DBFF20ED8758F4C20FE759EFB33141CF0E892587FE8187E5FBC57786B7E8B089612C936DFC03D27EFBBE7C8673F1606BD51D5FF386F4A7AB68EDF59F385EB1291F117BFE717399"),
SHEX("4629C97F9BA98698E0DDECA5E0A3B6DE210EA9E84BF942C2CCF4EC68"));
test_hash(&nettle_sha3_224, /* 249 octets */
SHEX("6A01830AF3889A25183244DECB508BD01253D5B508AB490D3124AFBF42626B2E70894E9B562B288D0A2450CFACF14A0DDAE5C04716E5A0082C33981F6037D23D5E045EE1EF2283FB8B6378A914C5D9441627A722C282FF452E25A7EA608D69CEE4393A0725D17963D0342684F255496D8A18C2961145315130549311FC07F0312FB78E6077334F87EAA873BEE8AA95698996EB21375EB2B4EF53C14401207DEB4568398E5DD9A7CF97E8C9663E23334B46912F8344C19EFCF8C2BA6F04325F1A27E062B62A58D0766FC6DB4D2C6A1928604B0175D872D16B7908EBC041761187CC785526C2A3873FEAC3A642BB39F5351550AF9770C328AF7B"),
SHEX("F45034AA94C1A2686EB849EF4262F2F5BA9ACDD0E8EA32401E060B43"));
test_hash(&nettle_sha3_224, /* 250 octets */
SHEX("B3C5E74B69933C2533106C563B4CA20238F2B6E675E8681E34A389894785BDADE59652D4A73D80A5C85BD454FD1E9FFDAD1C3815F5038E9EF432AAC5C3C4FE840CC370CF86580A6011778BBEDAF511A51B56D1A2EB68394AA299E26DA9ADA6A2F39B9FAFF7FBA457689B9C1A577B2A1E505FDF75C7A0A64B1DF81B3A356001BF0DF4E02A1FC59F651C9D585EC6224BB279C6BEBA2966E8882D68376081B987468E7AED1EF90EBD090AE825795CDCA1B4F09A979C8DFC21A48D8A53CDBB26C4DB547FC06EFE2F9850EDD2685A4661CB4911F165D4B63EF25B87D0A96D3DFF6AB0758999AAD214D07BD4F133A6734FDE445FE474711B69A98F7E2B"),
SHEX("62153F592C49D3C0485F80073319049A510C730327940CD9D52F3698"));
test_hash(&nettle_sha3_224, /* 251 octets */
SHEX("83AF34279CCB5430FEBEC07A81950D30F4B66F484826AFEE7456F0071A51E1BBC55570B5CC7EC6F9309C17BF5BEFDD7C6BA6E968CF218A2B34BD5CF927AB846E38A40BBD81759E9E33381016A755F699DF35D660007B5EADF292FEEFB735207EBF70B5BD17834F7BFA0E16CB219AD4AF524AB1EA37334AA66435E5D397FC0A065C411EBBCE32C240B90476D307CE802EC82C1C49BC1BEC48C0675EC2A6C6F3ED3E5B741D13437095707C565E10D8A20B8C20468FF9514FCF31B4249CD82DCEE58C0A2AF538B291A87E3390D737191A07484A5D3F3FB8C8F15CE056E5E5F8FEBE5E1FB59D6740980AA06CA8A0C20F5712B4CDE5D032E92AB89F0AE1"),
SHEX("ECDE4D6EB0CF28010B45D0D310E7D05F08B80AFC44B8A359BE7E1923"));
test_hash(&nettle_sha3_224, /* 252 octets */
SHEX("A7ED84749CCC56BB1DFBA57119D279D412B8A986886D810F067AF349E8749E9EA746A60B03742636C464FC1EE233ACC52C1983914692B64309EDFDF29F1AB912EC3E8DA074D3F1D231511F5756F0B6EEAD3E89A6A88FE330A10FACE267BFFBFC3E3090C7FD9A850561F363AD75EA881E7244F80FF55802D5EF7A1A4E7B89FCFA80F16DF54D1B056EE637E6964B9E0FFD15B6196BDD7DB270C56B47251485348E49813B4EB9ED122A01B3EA45AD5E1A929DF61D5C0F3E77E1FDC356B63883A60E9CBB9FC3E00C2F32DBD469659883F690C6772E335F617BC33F161D6F6984252EE12E62B6000AC5231E0C9BC65BE223D8DFD94C5004A101AF9FD6C0FB"),
SHEX("3BFC5018CF15CB88007929924B3E014635EF135C91F9671B29BE8731"));
test_hash(&nettle_sha3_224, /* 253 octets */
SHEX("A6FE30DCFCDA1A329E82AB50E32B5F50EB25C873C5D2305860A835AECEE6264AA36A47429922C4B8B3AFD00DA16035830EDB897831C4E7B00F2C23FC0B15FDC30D85FB70C30C431C638E1A25B51CAF1D7E8B050B7F89BFB30F59F0F20FECFF3D639ABC4255B3868FC45DD81E47EB12AB40F2AAC735DF5D1DC1AD997CEFC4D836B854CEE9AC02900036F3867FE0D84AFFF37BDE3308C2206C62C4743375094108877C73B87B2546FE05EA137BEDFC06A2796274099A0D554DA8F7D7223A48CBF31B7DECAA1EBC8B145763E3673168C1B1B715C1CD99ECD3DDB238B06049885ECAD9347C2436DFF32C771F34A38587A44A82C5D3D137A03CAA27E66C8FF6"),
SHEX("22715559AD15717722B1FA0583996090C79C3DF16CC1E6E0F6D3E898"));
test_hash(&nettle_sha3_224, /* 254 octets */
SHEX("83167FF53704C3AA19E9FB3303539759C46DD4091A52DDAE9AD86408B69335989E61414BC20AB4D01220E35241EFF5C9522B079FBA597674C8D716FE441E566110B6211531CECCF8FD06BC8E511D00785E57788ED9A1C5C73524F01830D2E1148C92D0EDC97113E3B7B5CD3049627ABDB8B39DD4D6890E0EE91993F92B03354A88F52251C546E64434D9C3D74544F23FB93E5A2D2F1FB15545B4E1367C97335B0291944C8B730AD3D4789273FA44FB98D78A36C3C3764ABEEAC7C569C1E43A352E5B770C3504F87090DEE075A1C4C85C0C39CF421BDCC615F9EFF6CB4FE6468004AECE5F30E1ECC6DB22AD9939BB2B0CCC96521DFBF4AE008B5B46BC006E"),
SHEX("2F36FF8AB7264F7A5766DE025018E19B5A64D90994B743B8FBFBDCCA"));
test_hash(&nettle_sha3_224, /* 255 octets */
SHEX("3A3A819C48EFDE2AD914FBF00E18AB6BC4F14513AB27D0C178A188B61431E7F5623CB66B23346775D386B50E982C493ADBBFC54B9A3CD383382336A1A0B2150A15358F336D03AE18F666C7573D55C4FD181C29E6CCFDE63EA35F0ADF5885CFC0A3D84A2B2E4DD24496DB789E663170CEF74798AA1BBCD4574EA0BBA40489D764B2F83AADC66B148B4A0CD95246C127D5871C4F11418690A5DDF01246A0C80A43C70088B6183639DCFDA4125BD113A8F49EE23ED306FAAC576C3FB0C1E256671D817FC2534A52F5B439F72E424DE376F4C565CCA82307DD9EF76DA5B7C4EB7E085172E328807C02D011FFBF33785378D79DC266F6A5BE6BB0E4A92ECEEBAEB1"),
SHEX("5AF56987EA9CF11FCD0EAC5EBC14B037365E9B1123E31CB2DFC7929A"));
}
| gpl-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-4.14/drivers/scsi/hosts.c | 10 | 16526 | /*
* hosts.c Copyright (C) 1992 Drew Eckhardt
* Copyright (C) 1993, 1994, 1995 Eric Youngdale
* Copyright (C) 2002-2003 Christoph Hellwig
*
* mid to lowlevel SCSI driver interface
* Initial versions: Drew Eckhardt
* Subsequent revisions: Eric Youngdale
*
* <drew@colorado.edu>
*
* Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
* Added QLOGIC QLA1280 SCSI controller kernel host support.
* August 4, 1999 Fred Lewis, Intel DuPont
*
* Updated to reflect the new initialization scheme for the higher
* level of scsi drivers (sd/sr/st)
* September 17, 2000 Torben Mathiasen <tmm@image.dk>
*
* Restructured scsi_host lists and associated functions.
* September 04, 2002 Mike Anderson (andmike@us.ibm.com)
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/transport_class.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
static DEFINE_IDA(host_index_ida);
static void scsi_host_cls_release(struct device *dev)
{
put_device(&class_to_shost(dev)->shost_gendev);
}
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
};
/**
* scsi_host_set_state - Take the given host through the host state model.
* @shost: scsi host to change the state of.
* @state: state to change to.
*
* Returns zero if unsuccessful or an error if the requested
* transition is illegal.
**/
int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
{
enum scsi_host_state oldstate = shost->shost_state;
if (state == oldstate)
return 0;
switch (state) {
case SHOST_CREATED:
/* There are no legal states that come back to
* created. This is the manually initialised start
* state */
goto illegal;
case SHOST_RUNNING:
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RECOVERY:
break;
default:
goto illegal;
}
break;
case SHOST_RECOVERY:
switch (oldstate) {
case SHOST_RUNNING:
break;
default:
goto illegal;
}
break;
case SHOST_CANCEL:
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RUNNING:
case SHOST_CANCEL_RECOVERY:
break;
default:
goto illegal;
}
break;
case SHOST_DEL:
switch (oldstate) {
case SHOST_CANCEL:
case SHOST_DEL_RECOVERY:
break;
default:
goto illegal;
}
break;
case SHOST_CANCEL_RECOVERY:
switch (oldstate) {
case SHOST_CANCEL:
case SHOST_RECOVERY:
break;
default:
goto illegal;
}
break;
case SHOST_DEL_RECOVERY:
switch (oldstate) {
case SHOST_CANCEL_RECOVERY:
break;
default:
goto illegal;
}
break;
}
shost->shost_state = state;
return 0;
illegal:
SCSI_LOG_ERROR_RECOVERY(1,
shost_printk(KERN_ERR, shost,
"Illegal host state transition"
"%s->%s\n",
scsi_host_state_name(oldstate),
scsi_host_state_name(state)));
return -EINVAL;
}
EXPORT_SYMBOL(scsi_host_set_state);
/**
* scsi_remove_host - remove a scsi host
* @shost: a pointer to a scsi host to remove
**/
void scsi_remove_host(struct Scsi_Host *shost)
{
unsigned long flags;
mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_CANCEL))
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
spin_unlock_irqrestore(shost->host_lock, flags);
mutex_unlock(&shost->scan_mutex);
return;
}
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_autopm_get_host(shost);
flush_workqueue(shost->tmf_work_q);
scsi_forget_host(shost);
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
spin_unlock_irqrestore(shost->host_lock, flags);
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
}
EXPORT_SYMBOL(scsi_remove_host);
/**
* scsi_add_host_with_dma - add a scsi host with dma device
* @shost: scsi host pointer to add
* @dev: a struct device of type scsi class
* @dma_dev: dma device for the host
*
* Note: You rarely need to worry about this unless you're in a
* virtualised host environments, so use the simpler scsi_add_host()
* function instead.
*
* Return value:
* 0 on success / != 0 for error
**/
int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
struct device *dma_dev)
{
struct scsi_host_template *sht = shost->hostt;
int error = -EINVAL;
shost_printk(KERN_INFO, shost, "%s\n",
sht->info ? sht->info(shost) : sht->name);
if (!shost->can_queue) {
shost_printk(KERN_ERR, shost,
"can_queue = 0 no longer supported\n");
goto fail;
}
error = scsi_init_sense_cache(shost);
if (error)
goto fail;
if (shost_use_blk_mq(shost)) {
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
} else {
shost->bqt = blk_init_tags(shost->can_queue,
shost->hostt->tag_alloc_policy);
if (!shost->bqt) {
error = -ENOMEM;
goto fail;
}
}
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
if (!dma_dev)
dma_dev = shost->shost_gendev.parent;
shost->dma_dev = dma_dev;
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
* nothing else preventing suspending the device.
*/
pm_runtime_get_noresume(&shost->shost_gendev);
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
error = device_add(&shost->shost_gendev);
if (error)
goto out_disable_runtime_pm;
scsi_host_set_state(shost, SHOST_RUNNING);
get_device(shost->shost_gendev.parent);
device_enable_async_suspend(&shost->shost_dev);
error = device_add(&shost->shost_dev);
if (error)
goto out_del_gendev;
get_device(&shost->shost_gendev);
if (shost->transportt->host_size) {
shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL);
if (shost->shost_data == NULL) {
error = -ENOMEM;
goto out_del_dev;
}
}
if (shost->transportt->create_work_queue) {
snprintf(shost->work_q_name, sizeof(shost->work_q_name),
"scsi_wq_%d", shost->host_no);
shost->work_q = create_singlethread_workqueue(
shost->work_q_name);
if (!shost->work_q) {
error = -EINVAL;
goto out_free_shost_data;
}
}
error = scsi_sysfs_add_host(shost);
if (error)
goto out_destroy_host;
scsi_proc_host_add(shost);
scsi_autopm_put_host(shost);
return error;
out_destroy_host:
if (shost->work_q)
destroy_workqueue(shost->work_q);
out_free_shost_data:
kfree(shost->shost_data);
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev:
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
if (shost_use_blk_mq(shost))
scsi_mq_destroy_tags(shost);
fail:
return error;
}
EXPORT_SYMBOL(scsi_add_host_with_dma);
static void scsi_host_dev_release(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
scsi_proc_hostdir_rm(shost->hostt);
/* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
kthread_stop(shost->ehandler);
if (shost->work_q)
destroy_workqueue(shost->work_q);
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
* and scsi_host_put() have been called but neither
* scsi_host_add() nor scsi_host_remove() has been called.
* This avoids that the memory allocated for the shost_dev
* name is leaked.
*/
kfree(dev_name(&shost->shost_dev));
}
if (shost_use_blk_mq(shost)) {
if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
} else {
if (shost->bqt)
blk_free_tags(shost->bqt);
}
kfree(shost->shost_data);
ida_simple_remove(&host_index_ida, shost->host_no);
if (parent)
put_device(parent);
kfree(shost);
}
static int shost_eh_deadline = -1;
module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(eh_deadline,
"SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
static struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
};
/**
* scsi_host_alloc - register a scsi host adapter instance.
* @sht: pointer to scsi host template
* @privsize: extra bytes to allocate for driver
*
* Note:
* Allocate a new Scsi_Host and perform basic initialization.
* The host is not published to the scsi midlayer until scsi_add_host
* is called.
*
* Return value:
* Pointer to a new Scsi_Host
**/
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
gfp_t gfp_mask = GFP_KERNEL;
int index;
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
if (!shost)
return NULL;
shost->host_lock = &shost->default_lock;
spin_lock_init(shost->host_lock);
shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets);
INIT_LIST_HEAD(&shost->eh_cmd_q);
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
goto fail_kfree;
shost->host_no = index;
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
shost->max_channel = 0;
shost->max_id = 8;
shost->max_lun = 8;
/* Give each shost a default transportt */
shost->transportt = &blank_transport_template;
/*
* All drivers right now should be able to handle 12 byte
* commands. Every so often there are requests for 16 byte
* commands, but individual low-level drivers need to certify that
* they actually do something sensible with such commands.
*/
shost->max_cmd_len = 12;
shost->hostt = sht;
shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue;
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering;
shost->no_write_same = sht->no_write_same;
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1;
else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
shost_printk(KERN_WARNING, shost,
"eh_deadline %u too large, setting to %u\n",
shost_eh_deadline, INT_MAX / HZ);
shost->eh_deadline = INT_MAX;
} else
shost->eh_deadline = shost_eh_deadline * HZ;
if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */
shost->active_mode = MODE_INITIATOR;
else
shost->active_mode = sht->supported_mode;
if (sht->max_host_blocked)
shost->max_host_blocked = sht->max_host_blocked;
else
shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
/*
* If the driver imposes no hard sector transfer limit, start at
* machine infinity initially.
*/
if (sht->max_sectors)
shost->max_sectors = sht->max_sectors;
else
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
/*
* assume a 4GB boundary, if not set
*/
if (sht->dma_boundary)
shost->dma_boundary = sht->dma_boundary;
else
shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq;
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
shost->shost_gendev.type = &scsi_host_type;
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
if (IS_ERR(shost->ehandler)) {
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
goto fail_index_remove;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
WQ_UNBOUND | WQ_MEM_RECLAIM,
1, shost->host_no);
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
goto fail_kthread;
}
scsi_proc_hostdir_add(shost->hostt);
return shost;
fail_kthread:
kthread_stop(shost->ehandler);
fail_index_remove:
ida_simple_remove(&host_index_ida, shost->host_no);
fail_kfree:
kfree(shost);
return NULL;
}
EXPORT_SYMBOL(scsi_host_alloc);
struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
if (!sht->detect) {
printk(KERN_WARNING "scsi_register() called on new-style "
"template for driver %s\n", sht->name);
dump_stack();
}
if (shost)
list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
return shost;
}
EXPORT_SYMBOL(scsi_register);
void scsi_unregister(struct Scsi_Host *shost)
{
list_del(&shost->sht_legacy_list);
scsi_host_put(shost);
}
EXPORT_SYMBOL(scsi_unregister);
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
const unsigned short *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
}
/**
* scsi_host_lookup - get a reference to a Scsi_Host by host no
* @hostnum: host number to locate
*
* Return value:
* A pointer to located Scsi_Host or NULL.
*
* The caller must do a scsi_host_put() to drop the reference
* that scsi_host_get() took. The put_device() below dropped
* the reference from class_find_device().
**/
struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = NULL;
cdev = class_find_device(&shost_class, NULL, &hostnum,
__scsi_host_match);
if (cdev) {
shost = scsi_host_get(class_to_shost(cdev));
put_device(cdev);
}
return shost;
}
EXPORT_SYMBOL(scsi_host_lookup);
/**
* scsi_host_get - inc a Scsi_Host ref count
* @shost: Pointer to Scsi_Host to inc.
**/
struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
{
if ((shost->shost_state == SHOST_DEL) ||
!get_device(&shost->shost_gendev))
return NULL;
return shost;
}
EXPORT_SYMBOL(scsi_host_get);
/**
* scsi_host_put - dec a Scsi_Host ref count
* @shost: Pointer to Scsi_Host to dec.
**/
void scsi_host_put(struct Scsi_Host *shost)
{
put_device(&shost->shost_gendev);
}
EXPORT_SYMBOL(scsi_host_put);
int scsi_init_hosts(void)
{
return class_register(&shost_class);
}
void scsi_exit_hosts(void)
{
class_unregister(&shost_class);
ida_destroy(&host_index_ida);
}
int scsi_is_host_device(const struct device *dev)
{
return dev->type == &scsi_host_type;
}
EXPORT_SYMBOL(scsi_is_host_device);
/**
* scsi_queue_work - Queue work to the Scsi_Host workqueue.
* @shost: Pointer to Scsi_Host.
* @work: Work to queue for execution.
*
* Return value:
* 1 - work queued for execution
* 0 - work is already queued
* -EINVAL - work queue doesn't exist
**/
int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
{
if (unlikely(!shost->work_q)) {
shost_printk(KERN_ERR, shost,
"ERROR: Scsi host '%s' attempted to queue scsi-work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return -EINVAL;
}
return queue_work(shost->work_q, work);
}
EXPORT_SYMBOL_GPL(scsi_queue_work);
/**
* scsi_flush_work - Flush a Scsi_Host's workqueue.
* @shost: Pointer to Scsi_Host.
**/
void scsi_flush_work(struct Scsi_Host *shost)
{
if (!shost->work_q) {
shost_printk(KERN_ERR, shost,
"ERROR: Scsi host '%s' attempted to flush scsi-work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
return;
}
flush_workqueue(shost->work_q);
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
| gpl-2.0 |
martbhell/slurm | src/common/plugrack.c | 10 | 16673 | /*****************************************************************************\
* plugrack.c - an intelligent container for plugins
*****************************************************************************
* Copyright (C) 2002-2007 The Regents of the University of California.
* Copyright (C) 2008-2009 Lawrence Livermore National Security.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Jay Windley <jwindley@lnxi.com>.
* CODE-OCEC-09-009. All rights reserved.
*
* This file is part of SLURM, a resource management program.
* For details, see <http://slurm.schedmd.com/>.
* Please also read the included file: DISCLAIMER.
*
* SLURM is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* In addition, as a special exception, the copyright holders give permission
* to link the code of portions of this program with the OpenSSL library under
* certain conditions as described in each individual source file, and
* distribute linked combinations including the two. You must obey the GNU
* General Public License in all respects for all of the code used other than
* OpenSSL. If you modify file(s) with this exception, you may extend this
* exception to your version of the file(s), but you are not obligated to do
* so. If you do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source files in
* the program, then also delete it here.
*
* SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along
* with SLURM; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
\*****************************************************************************/
#if HAVE_CONFIG_H
# include "config.h"
# if HAVE_DIRENT_H
# include <dirent.h>
# define NAMLEN(dirent) strlen((dirent)->d_name)
# else /* ! HAVE_DIRENT_H */
# define dirent direct
# define NAMLEN(dirent) (dirent)->d_namlen
# endif /* HAVE_DIRENT_H */
# if STDC_HEADERS
# include <string.h>
# else /* ! STDC_HEADERS */
# if !HAVE_STRCHR
# define strchr index
# define strrchr rindex
char *strchr(), *strrchr();
# endif /* HAVE_STRCHR */
# endif /* STDC_HEADERS */
# if HAVE_UNISTD_H
# include <unistd.h>
# endif /* HAVE_UNISTD_H */
# if HAVE_SYS_TYPES_H
# include <sys/types.h>
# endif
# if HAVE_SYS_STAT_H
# include <sys/stat.h>
# endif
# if HAVE_STDLIB_H
# include <stdlib.h>
# endif
#else /* ! HAVE_CONFIG_H */
# include <dirent.h>
# include <string.h>
# include <stdlib.h>
# include <unistd.h>
# include <dirent.h>
# include <sys/types.h>
# include <sys/stat.h>
#endif /* HAVE_CONFIG_H */
#include "src/common/macros.h"
#include "src/common/xassert.h"
#include "src/common/xmalloc.h"
#include "src/common/xstring.h"
#include "src/common/plugrack.h"
strong_alias(plugrack_create, slurm_plugrack_create);
strong_alias(plugrack_destroy, slurm_plugrack_destroy);
strong_alias(plugrack_read_dir, slurm_plugrack_read_dir);
strong_alias(plugrack_set_major_type, slurm_plugrack_set_major_type);
strong_alias(plugrack_set_paranoia, slurm_plugrack_set_paranoia);
strong_alias(plugrack_use_by_type, slurm_plugrack_use_by_type);
/*
* Represents a plugin in the rack.
*
* full_type is the fully-qualified plugin type, e.g., "auth/kerberos".
* For the low-level plugin interface the type can be whatever it needs
* to be. For the rack-level interface, the type exported by the plugin
* must be of the form "<major>/<minor>".
*
* fq_path is the fully-qualified pathname to the plugin.
*
* plug is the plugin handle. If it is equal to PLUGIN_INVALID_HANDLE
* then the plugin is not currently loaded in memory.
*
* refcount shows how many clients have requested to use the plugin.
* If this is zero, the rack code may decide to unload the plugin.
*/
typedef struct _plugrack_entry {
const char *full_type;
const char *fq_path;
plugin_handle_t plug;
int refcount;
} plugrack_entry_t;
/*
* Implementation of the plugin rack.
*
* entries is the list of plugrack_entry_t.
*
* uid is the Linux UID of the person authorized to own the plugin
* and write to the plugin file and the directory where it is stored.
* This field is used only if paranoia is nonzero.
*
* paranoia is a set of bit flags indicating what operations should be
* done to verify the integrity and authority of the plugin before
* loading it.
*/
struct _plugrack {
List entries;
const char *major_type;
uid_t uid;
uint8_t paranoia;
};
#define PLUGRACK_UID_NOBODY 99 /* RedHat's, anyway. */
static bool _match_major ( const char *path_name, const char *major_type );
static int _plugrack_read_single_dir( plugrack_t rack, char *dir );
static bool _so_file( char *pathname );
/*
* Destructor function for the List code. This should entirely
* clean up a plugin_entry_t.
*/
static void
plugrack_entry_destructor( void *v )
{
plugrack_entry_t *victim = v;
if ( victim == NULL )
return;
/*
* Free memory and unload the plugin if necessary. The assert
* is to make sure we were actually called from the List destructor
* which should only be callable from plugrack_destroy().
*/
xassert( victim->refcount == 0 );
xfree( victim->full_type );
xfree( victim->fq_path );
if ( victim->plug != PLUGIN_INVALID_HANDLE )
plugin_unload( victim->plug );
xfree( victim );
}
/*
* Check a pathname to see if it is owned and writable by the appropriate
* users, and writable by no one else. The path can be either to a file
* or to a directory. This is so, when fishing for plugins in a whole
* directory, we can test the directory once and then each file.
*
* Returns non-zero if the file system node indicated by the path name
* is owned by the user in the plugin rack and not writable by anyone
* else, and these actions are requested by the rack's paranoia policy.
*/
static int
accept_path_paranoia( plugrack_t rack,
const char *fq_path,
int check_own,
int check_write )
{
struct stat st;
/* Internal function, so assert rather than fail gracefully. */
xassert( rack );
xassert( fq_path );
if ( stat( fq_path, &st ) < 0 ) {
debug3( "accept_path_paranoia: stat(%s) failed", fq_path );
return 0;
}
/* Is path owned by authorized user? */
if ( check_own ) {
if ( st.st_uid != rack->uid ) {
debug3( "accept_path_paranoia: %s not owned by "
"proper user", fq_path );
return 0;
}
}
/* Is path writable by others? */
if ( check_write ) {
if ( ( st.st_mode & S_IWGRP ) || ( st.st_mode & S_IWOTH ) ) {
debug3( "accept_path_paranoia: %s writable by others",
fq_path );
return 0;
}
}
return 1;
}
plugrack_t plugrack_create( void )
{
plugrack_t rack = (plugrack_t) xmalloc( sizeof( struct _plugrack ) );
rack->paranoia = PLUGRACK_PARANOIA_NONE;
rack->major_type = NULL;
rack->uid = PLUGRACK_UID_NOBODY;
rack->entries = list_create( plugrack_entry_destructor );
if ( rack->entries == NULL ) {
xfree( rack );
return NULL;
}
return rack;
}
int
plugrack_destroy( plugrack_t rack )
{
ListIterator it;
plugrack_entry_t *e;
if ( ! rack )
return SLURM_ERROR;
/*
* See if there are any plugins still being used. If we unload them,
* the program might crash because cached virtual mapped addresses
* will suddenly be outside our virtual address space.
*/
it = list_iterator_create( rack->entries );
while ( ( e = list_next( it ) ) != NULL ) {
if ( e->refcount > 0 ) {
debug2( "plugrack_destroy: attempt to destroy "
"plugin rack that is still in use" );
list_iterator_destroy( it );
return SLURM_ERROR; /* plugins still in use. */
}
}
list_iterator_destroy( it );
FREE_NULL_LIST( rack->entries );
xfree( rack->major_type );
xfree( rack );
return SLURM_SUCCESS;
}
int
plugrack_set_major_type( plugrack_t rack, const char *type )
{
if ( ! rack )
return SLURM_ERROR;
if ( ! type )
return SLURM_ERROR;
/* Free any pre-existing type. */
xfree( rack->major_type );
/* Install a new one. */
if ( type != NULL ) {
rack->major_type = xstrdup( type );
if ( rack->major_type == NULL ) {
debug3( "plugrack_set_major_type: unable to set type");
return SLURM_ERROR;
}
}
return SLURM_SUCCESS;
}
int
plugrack_set_paranoia( plugrack_t rack,
const uint32_t flags,
const uid_t uid )
{
if ( ! rack )
return SLURM_ERROR;
rack->paranoia = flags;
if ( flags ) {
rack->uid = uid;
}
return SLURM_SUCCESS;
}
static int
plugrack_add_plugin_path( plugrack_t rack,
const char *full_type,
const char *fq_path )
{
plugrack_entry_t *e;
if ( ( ! rack ) || ( ! fq_path ) )
return SLURM_ERROR;
e = (plugrack_entry_t *) xmalloc( sizeof( plugrack_entry_t ) );
e->full_type = xstrdup( full_type );
e->fq_path = xstrdup( fq_path );
e->plug = PLUGIN_INVALID_HANDLE;
e->refcount = 0;
list_append( rack->entries, e );
return SLURM_SUCCESS;
}
/* test for the plugin in the various colon separated directories */
int
plugrack_read_dir( plugrack_t rack, const char *dir )
{
char *head, *dir_array;
int i, rc = SLURM_SUCCESS;
if ( ( ! rack ) || ( ! dir ) )
return SLURM_ERROR;
dir_array = xstrdup(dir);
head = dir_array;
for (i=0; ; i++) {
if (dir_array[i] == '\0') {
if ( _plugrack_read_single_dir( rack, head ) ==
SLURM_ERROR)
rc = SLURM_ERROR;
break;
}
else if (dir_array[i] == ':') {
dir_array[i] = '\0';
if ( _plugrack_read_single_dir( rack, head ) ==
SLURM_ERROR)
rc = SLURM_ERROR;
head = dir_array + i + 1;
}
}
xfree( dir_array );
return rc;
}
static int
_plugrack_read_single_dir( plugrack_t rack, char *dir )
{
char *fq_path;
char *tail;
DIR *dirp;
struct dirent *e;
struct stat st;
static const size_t type_len = 64;
char plugin_type[ type_len ];
static int max_path_len = 0;
/* Allocate a buffer for fully-qualified path names. */
if (max_path_len == 0) {
max_path_len = pathconf("/", _PC_NAME_MAX);
if (max_path_len <= 0)
max_path_len = 256;
}
fq_path = xmalloc( strlen( dir ) + max_path_len + 1 );
xassert( fq_path );
/*
* Write the directory name in it, then a separator, then
* keep track of where we want to write the individual file
* names.
*/
strcpy( fq_path, dir );
tail = &fq_path[ strlen( dir ) ];
*tail = '/';
++tail;
/* Check whether we should be paranoid about this directory. */
if ( ! accept_path_paranoia( rack,
dir,
rack->paranoia &
PLUGRACK_PARANOIA_DIR_OWN,
rack->paranoia &
PLUGRACK_PARANOIA_DIR_WRITABLE ) ) {
xfree( fq_path );
return SLURM_ERROR;
}
/* Open the directory. */
dirp = opendir( dir );
if ( dirp == NULL ) {
error( "cannot open plugin directory %s", dir );
xfree( fq_path );
return SLURM_ERROR;
}
while ( 1 ) {
e = readdir( dirp );
if ( e == NULL )
break;
/*
* Compose file name. Where NAME_MAX is defined it represents
* the largest file name given in a dirent. This macro is used
* in the allocation of "tail" above, so this unbounded copy
* should work.
*/
strcpy( tail, e->d_name );
/* Check only regular files. */
if ( (strncmp(e->d_name, ".", 1) == 0) ||
(stat( fq_path, &st ) < 0) ||
(! S_ISREG(st.st_mode)) )
continue;
/* Check only shared object files */
if (! _so_file(e->d_name))
continue;
/* file's prefix must match specified major_type
* to avoid having some program try to open a
* plugin designed for a different program and
* discovering undefined symbols */
if ((rack->major_type) &&
(!_match_major(e->d_name, rack->major_type)))
continue;
/* See if we should be paranoid about this file. */
if (!accept_path_paranoia( rack,
fq_path,
rack->paranoia &
PLUGRACK_PARANOIA_FILE_OWN,
rack->paranoia &
PLUGRACK_PARANOIA_FILE_WRITABLE )) {
debug3( "plugin_read_dir: skipping %s for security "
"reasons", fq_path );
continue;
}
/* Test the type. */
if ( plugin_peek( fq_path,
plugin_type,
type_len,
NULL ) == SLURM_ERROR ) {
continue;
}
if ( rack->major_type &&
( strncmp( rack->major_type,
plugin_type,
strlen( rack->major_type ) ) != 0 ) ) {
continue;
}
/* Add it to the list. */
(void) plugrack_add_plugin_path( rack, plugin_type, fq_path );
}
closedir( dirp );
xfree( fq_path );
return SLURM_SUCCESS;
}
/* Return TRUE if the specified pathname is recognized as that of a shared
* object (i.e. containing ".so\0") */
static bool
_so_file ( char *file_name )
{
int i;
if (file_name == NULL)
return false;
for (i=0; file_name[i] ;i++) {
if ( (file_name[i] == '.') && (file_name[i+1] == 's') &&
(file_name[i+2] == 'o') && (file_name[i+3] == '\0') )
return true;
}
return false;
}
/* Return TRUE of the specified major_type is a prefix of the shared object
* pathname (i.e. either "<major_name>..." or "lib<major_name>...") */
static bool
_match_major ( const char *path_name, const char *major_type )
{
char *head = (char *)path_name;
/* Special case for BlueGene systems */
if (strncmp(head, "libsched_if", 11) == 0)
return FALSE;
if (strncmp(head, "lib", 3) == 0)
head += 3;
if (strncmp(head, major_type, strlen(major_type)))
return FALSE;
return TRUE;
}
int
plugrack_read_cache( plugrack_t rack,
const char *cache_file )
{
/* Don't care for now. */
return SLURM_ERROR;
}
int
plugrack_purge_idle( plugrack_t rack )
{
ListIterator it;
plugrack_entry_t *e;
if ( ! rack )
return SLURM_ERROR;
it = list_iterator_create( rack->entries );
while ( ( e = list_next( it ) ) != NULL ) {
if ( ( e->plug != PLUGIN_INVALID_HANDLE ) &&
( e->refcount == 0 ) ){
plugin_unload( e->plug );
e->plug = PLUGIN_INVALID_HANDLE;
}
}
list_iterator_destroy( it );
return SLURM_SUCCESS;
}
int
plugrack_load_all( plugrack_t rack )
{
ListIterator it;
plugrack_entry_t *e;
if ( ! rack )
return SLURM_ERROR;
it = list_iterator_create( rack->entries );
while ( ( e = list_next( it ) ) != NULL ) {
if ( e->plug == PLUGIN_INVALID_HANDLE ) {
plugin_load_from_file(&e->plug, e->fq_path);
}
}
list_iterator_destroy( it );
return SLURM_SUCCESS;
}
int
plugrack_write_cache( plugrack_t rack,
const char *cache )
{
/* Not implemented. */
return SLURM_SUCCESS;
}
plugin_handle_t
plugrack_use_by_type( plugrack_t rack,
const char *full_type )
{
ListIterator it;
plugrack_entry_t *e;
if ( (!rack) || (!full_type) )
return PLUGIN_INVALID_HANDLE;
it = list_iterator_create(rack->entries);
while ((e = list_next(it))) {
plugin_err_t err;
if (strcmp(full_type, e->full_type) != 0)
continue;
/* See if plugin is loaded. */
if (e->plug == PLUGIN_INVALID_HANDLE &&
(err = plugin_load_from_file(&e->plug, e->fq_path)))
error ("%s: %s", e->fq_path, plugin_strerror (err));
/* If load was successful, increment the reference count. */
if (e->plug != PLUGIN_INVALID_HANDLE)
e->refcount++;
/*
* Return the plugin, even if it failed to load -- this serves
* as an error return value.
*/
list_iterator_destroy(it);
return e->plug;
}
/* Couldn't find a suitable plugin. */
list_iterator_destroy(it);
return PLUGIN_INVALID_HANDLE;
}
int
plugrack_finished_with_plugin( plugrack_t rack, plugin_handle_t plug )
{
ListIterator it;
plugrack_entry_t *e;
if ( ! rack )
return SLURM_ERROR;
it = list_iterator_create( rack->entries );
while ( ( e = list_next( it ) ) != NULL ) {
if ( e->plug == plug ) {
e->refcount--;
if ( e->refcount < 0 )
e->refcount = 0;
/* Do something here with purge policy. */
list_iterator_destroy( it );
return SLURM_SUCCESS;
}
}
/* Plugin not in this rack. */
list_iterator_destroy( it );
return SLURM_ERROR;
}
int
plugrack_print_all_plugin(plugrack_t rack)
{
ListIterator itr;
plugrack_entry_t *e = NULL;
xassert(rack->entries);
itr = list_iterator_create(rack->entries);
info("MPI types are...");
while ((e = list_next(itr)) != NULL ) {
info("%s", e->full_type);
}
list_iterator_destroy(itr);
return SLURM_SUCCESS;
}
| gpl-2.0 |
nisihara1/q-e | LR_Modules/compute_vsgga.f90 | 10 | 5055 | !
! Copyright (C) 2001-2016 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!
!----------------------------------------------------------------------------
SUBROUTINE compute_vsgga( rhoout, grho, vsgga )
!----------------------------------------------------------------------------
!
USE constants, ONLY : e2
USE kinds, ONLY : DP
USE gvect, ONLY : nl, ngm, g
USE cell_base, ONLY : alat
USE noncollin_module, ONLY : noncolin, nspin_gga
USE funct, ONLY : gcxc, gcx_spin, gcc_spin, &
gcc_spin_more, dft_is_gradient, get_igcc
USE spin_orb, ONLY : domag
USE fft_base, ONLY : dfftp
!
IMPLICIT NONE
!
REAL(DP), INTENT(IN) :: rhoout(dfftp%nnr,nspin_gga)
REAL(DP), INTENT(IN) :: grho(3,dfftp%nnr,nspin_gga)
REAL(DP), INTENT(OUT) :: vsgga(dfftp%nnr)
!
INTEGER :: k, ipol, is
!
REAL(DP), ALLOCATABLE :: h(:,:,:), dh(:)
REAL(DP), ALLOCATABLE :: vaux(:,:)
!
LOGICAL :: igcc_is_lyp
REAL(DP) :: grho2(2), sx, sc, v2c, &
v1xup, v1xdw, v2xup, v2xdw, v1cup, v1cdw , &
arho, zeta, rh, grh2
REAL(DP) :: v2cup, v2cdw, v2cud, rup, rdw, &
grhoup, grhodw, grhoud, grup, grdw
!
REAL(DP), PARAMETER :: vanishing_charge = 1.D-6, &
vanishing_mag = 1.D-12
REAL(DP), PARAMETER :: epsr = 1.D-6, epsg = 1.D-10
!
!
IF ( .NOT. dft_is_gradient() ) RETURN
IF ( .NOT. (noncolin.and.domag) ) &
call errore('compute_vsgga','routine called in the wrong case',1)
igcc_is_lyp = (get_igcc() == 3)
!
ALLOCATE( h( 3, dfftp%nnr, nspin_gga) )
ALLOCATE( vaux( dfftp%nnr, nspin_gga ) )
DO k = 1, dfftp%nnr
!
rh = rhoout(k,1) + rhoout(k,2)
!
arho=abs(rh)
!
IF ( arho > vanishing_charge ) THEN
!
grho2(:) = grho(1,k,:)**2 + grho(2,k,:)**2 + grho(3,k,:)**2
!
IF ( grho2(1) > epsg .OR. grho2(2) > epsg ) THEN
CALL gcx_spin( rhoout(k,1), rhoout(k,2), grho2(1), &
grho2(2), sx, v1xup, v1xdw, v2xup, v2xdw )
!
IF ( igcc_is_lyp ) THEN
!
rup = rhoout(k,1)
rdw = rhoout(k,2)
!
grhoup = grho(1,k,1)**2 + grho(2,k,1)**2 + grho(3,k,1)**2
grhodw = grho(1,k,2)**2 + grho(2,k,2)**2 + grho(3,k,2)**2
!
grhoud = grho(1,k,1) * grho(1,k,2) + &
grho(2,k,1) * grho(2,k,2) + &
grho(3,k,1) * grho(3,k,2)
!
CALL gcc_spin_more( rup, rdw, grhoup, grhodw, grhoud, &
sc, v1cup, v1cdw, v2cup, v2cdw, v2cud )
!
ELSE
!
zeta = ( rhoout(k,1) - rhoout(k,2) ) / rh
!
grh2 = ( grho(1,k,1) + grho(1,k,2) )**2 + &
( grho(2,k,1) + grho(2,k,2) )**2 + &
( grho(3,k,1) + grho(3,k,2) )**2
!
CALL gcc_spin( rh, zeta, grh2, sc, v1cup, v1cdw, v2c )
!
v2cup = v2c
v2cdw = v2c
v2cud = v2c
!
END IF
ELSE
!
sc = 0.D0
sx = 0.D0
v1xup = 0.D0
v1xdw = 0.D0
v2xup = 0.D0
v2xdw = 0.D0
v1cup = 0.D0
v1cdw = 0.D0
v2c = 0.D0
v2cup = 0.D0
v2cdw = 0.D0
v2cud = 0.D0
ENDIF
ELSE
!
sc = 0.D0
sx = 0.D0
v1xup = 0.D0
v1xdw = 0.D0
v2xup = 0.D0
v2xdw = 0.D0
v1cup = 0.D0
v1cdw = 0.D0
v2c = 0.D0
v2cup = 0.D0
v2cdw = 0.D0
v2cud = 0.D0
!
ENDIF
!
! ... first term of the gradient correction : D(rho*Exc)/D(rho)
!
vaux(k,1) = e2 * ( v1xup + v1cup )
vaux(k,2) = e2 * ( v1xdw + v1cdw )
!
! ... h contains D(rho*Exc)/D(|grad rho|) * (grad rho) / |grad rho|
!
DO ipol = 1, 3
!
grup = grho(ipol,k,1)
grdw = grho(ipol,k,2)
h(ipol,k,1) = e2 * ( ( v2xup + v2cup ) * grup + v2cud * grdw )
h(ipol,k,2) = e2 * ( ( v2xdw + v2cdw ) * grdw + v2cud * grup )
!
END DO
!
END DO
!
ALLOCATE( dh( dfftp%nnr ) )
!
! ... second term of the gradient correction :
! ... \sum_alpha (D / D r_alpha) ( D(rho*Exc)/D(grad_alpha rho) )
!
DO is = 1, nspin_gga
!
CALL grad_dot( dfftp%nnr, h(1,1,is), ngm, g, nl, alat, dh )
!
vaux(:,is) = vaux(:,is) - dh(:)
!
END DO
vsgga(:)=(vaux(:,1)-vaux(:,2))
!
DEALLOCATE( dh )
DEALLOCATE( h )
DEALLOCATE( vaux )
!
RETURN
!
END SUBROUTINE compute_vsgga
!
| gpl-2.0 |
acama/binutils | gdb/mt-tdep.c | 10 | 37396 | /* Target-dependent code for Morpho mt processor, for GDB.
Copyright (C) 2005-2015 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* Contributed by Michael Snyder, msnyder@redhat.com. */
#include "defs.h"
#include "frame.h"
#include "frame-unwind.h"
#include "frame-base.h"
#include "symtab.h"
#include "dis-asm.h"
#include "arch-utils.h"
#include "gdbtypes.h"
#include "regcache.h"
#include "reggroups.h"
#include "gdbcore.h"
#include "trad-frame.h"
#include "inferior.h"
#include "dwarf2-frame.h"
#include "infcall.h"
#include "language.h"
#include "valprint.h"
enum mt_arch_constants
{
MT_MAX_STRUCT_SIZE = 16
};
enum mt_gdb_regnums
{
MT_R0_REGNUM, /* 32 bit regs. */
MT_R1_REGNUM,
MT_1ST_ARGREG = MT_R1_REGNUM,
MT_R2_REGNUM,
MT_R3_REGNUM,
MT_R4_REGNUM,
MT_LAST_ARGREG = MT_R4_REGNUM,
MT_R5_REGNUM,
MT_R6_REGNUM,
MT_R7_REGNUM,
MT_R8_REGNUM,
MT_R9_REGNUM,
MT_R10_REGNUM,
MT_R11_REGNUM,
MT_R12_REGNUM,
MT_FP_REGNUM = MT_R12_REGNUM,
MT_R13_REGNUM,
MT_SP_REGNUM = MT_R13_REGNUM,
MT_R14_REGNUM,
MT_RA_REGNUM = MT_R14_REGNUM,
MT_R15_REGNUM,
MT_IRA_REGNUM = MT_R15_REGNUM,
MT_PC_REGNUM,
/* Interrupt Enable pseudo-register, exported by SID. */
MT_INT_ENABLE_REGNUM,
/* End of CPU regs. */
MT_NUM_CPU_REGS,
/* Co-processor registers. */
MT_COPRO_REGNUM = MT_NUM_CPU_REGS, /* 16 bit regs. */
MT_CPR0_REGNUM,
MT_CPR1_REGNUM,
MT_CPR2_REGNUM,
MT_CPR3_REGNUM,
MT_CPR4_REGNUM,
MT_CPR5_REGNUM,
MT_CPR6_REGNUM,
MT_CPR7_REGNUM,
MT_CPR8_REGNUM,
MT_CPR9_REGNUM,
MT_CPR10_REGNUM,
MT_CPR11_REGNUM,
MT_CPR12_REGNUM,
MT_CPR13_REGNUM,
MT_CPR14_REGNUM,
MT_CPR15_REGNUM,
MT_BYPA_REGNUM, /* 32 bit regs. */
MT_BYPB_REGNUM,
MT_BYPC_REGNUM,
MT_FLAG_REGNUM,
MT_CONTEXT_REGNUM, /* 38 bits (treat as array of
six bytes). */
MT_MAC_REGNUM, /* 32 bits. */
MT_Z1_REGNUM, /* 16 bits. */
MT_Z2_REGNUM, /* 16 bits. */
MT_ICHANNEL_REGNUM, /* 32 bits. */
MT_ISCRAMB_REGNUM, /* 32 bits. */
MT_QSCRAMB_REGNUM, /* 32 bits. */
MT_OUT_REGNUM, /* 16 bits. */
MT_EXMAC_REGNUM, /* 32 bits (8 used). */
MT_QCHANNEL_REGNUM, /* 32 bits. */
MT_ZI2_REGNUM, /* 16 bits. */
MT_ZQ2_REGNUM, /* 16 bits. */
MT_CHANNEL2_REGNUM, /* 32 bits. */
MT_ISCRAMB2_REGNUM, /* 32 bits. */
MT_QSCRAMB2_REGNUM, /* 32 bits. */
MT_QCHANNEL2_REGNUM, /* 32 bits. */
/* Number of real registers. */
MT_NUM_REGS,
/* Pseudo-registers. */
MT_COPRO_PSEUDOREG_REGNUM = MT_NUM_REGS,
MT_MAC_PSEUDOREG_REGNUM,
MT_COPRO_PSEUDOREG_ARRAY,
MT_COPRO_PSEUDOREG_DIM_1 = 2,
MT_COPRO_PSEUDOREG_DIM_2 = 8,
/* The number of pseudo-registers for each coprocessor. These
include the real coprocessor registers, the pseudo-registe for
the coprocessor number, and the pseudo-register for the MAC. */
MT_COPRO_PSEUDOREG_REGS = MT_NUM_REGS - MT_NUM_CPU_REGS + 2,
/* The register number of the MAC, relative to a given coprocessor. */
MT_COPRO_PSEUDOREG_MAC_REGNUM = MT_COPRO_PSEUDOREG_REGS - 1,
/* Two pseudo-regs ('coprocessor' and 'mac'). */
MT_NUM_PSEUDO_REGS = 2 + (MT_COPRO_PSEUDOREG_REGS
* MT_COPRO_PSEUDOREG_DIM_1
* MT_COPRO_PSEUDOREG_DIM_2)
};
/* The tdep structure. */
struct gdbarch_tdep
{
/* ISA-specific types. */
struct type *copro_type;
};
/* Return name of register number specified by REGNUM. */
static const char *
mt_register_name (struct gdbarch *gdbarch, int regnum)
{
static const char *const register_names[] = {
/* CPU regs. */
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"pc", "IE",
/* Co-processor regs. */
"", /* copro register. */
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",
"cr8", "cr9", "cr10", "cr11", "cr12", "cr13", "cr14", "cr15",
"bypa", "bypb", "bypc", "flag", "context", "" /* mac. */ , "z1", "z2",
"Ichannel", "Iscramb", "Qscramb", "out", "" /* ex-mac. */ , "Qchannel",
"zi2", "zq2", "Ichannel2", "Iscramb2", "Qscramb2", "Qchannel2",
/* Pseudo-registers. */
"coprocessor", "MAC"
};
static const char *array_names[MT_COPRO_PSEUDOREG_REGS
* MT_COPRO_PSEUDOREG_DIM_1
* MT_COPRO_PSEUDOREG_DIM_2];
if (regnum < 0)
return "";
if (regnum < ARRAY_SIZE (register_names))
return register_names[regnum];
if (array_names[regnum - MT_COPRO_PSEUDOREG_ARRAY])
return array_names[regnum - MT_COPRO_PSEUDOREG_ARRAY];
{
char *name;
const char *stub;
unsigned dim_1;
unsigned dim_2;
unsigned index;
regnum -= MT_COPRO_PSEUDOREG_ARRAY;
index = regnum % MT_COPRO_PSEUDOREG_REGS;
dim_2 = (regnum / MT_COPRO_PSEUDOREG_REGS) % MT_COPRO_PSEUDOREG_DIM_2;
dim_1 = ((regnum / MT_COPRO_PSEUDOREG_REGS / MT_COPRO_PSEUDOREG_DIM_2)
% MT_COPRO_PSEUDOREG_DIM_1);
if (index == MT_COPRO_PSEUDOREG_MAC_REGNUM)
stub = register_names[MT_MAC_PSEUDOREG_REGNUM];
else if (index >= MT_NUM_REGS - MT_CPR0_REGNUM)
stub = "";
else
stub = register_names[index + MT_CPR0_REGNUM];
if (!*stub)
{
array_names[regnum] = stub;
return stub;
}
name = (char *) xmalloc (30);
sprintf (name, "copro_%d_%d_%s", dim_1, dim_2, stub);
array_names[regnum] = name;
return name;
}
}
/* Return the type of a coprocessor register. */
static struct type *
mt_copro_register_type (struct gdbarch *arch, int regnum)
{
switch (regnum)
{
case MT_INT_ENABLE_REGNUM:
case MT_ICHANNEL_REGNUM:
case MT_QCHANNEL_REGNUM:
case MT_ISCRAMB_REGNUM:
case MT_QSCRAMB_REGNUM:
return builtin_type (arch)->builtin_int32;
case MT_BYPA_REGNUM:
case MT_BYPB_REGNUM:
case MT_BYPC_REGNUM:
case MT_Z1_REGNUM:
case MT_Z2_REGNUM:
case MT_OUT_REGNUM:
case MT_ZI2_REGNUM:
case MT_ZQ2_REGNUM:
return builtin_type (arch)->builtin_int16;
case MT_EXMAC_REGNUM:
case MT_MAC_REGNUM:
return builtin_type (arch)->builtin_uint32;
case MT_CONTEXT_REGNUM:
return builtin_type (arch)->builtin_long_long;
case MT_FLAG_REGNUM:
return builtin_type (arch)->builtin_unsigned_char;
default:
if (regnum >= MT_CPR0_REGNUM && regnum <= MT_CPR15_REGNUM)
return builtin_type (arch)->builtin_int16;
else if (regnum == MT_CPR0_REGNUM + MT_COPRO_PSEUDOREG_MAC_REGNUM)
{
if (gdbarch_bfd_arch_info (arch)->mach == bfd_mach_mrisc2
|| gdbarch_bfd_arch_info (arch)->mach == bfd_mach_ms2)
return builtin_type (arch)->builtin_uint64;
else
return builtin_type (arch)->builtin_uint32;
}
else
return builtin_type (arch)->builtin_uint32;
}
}
/* Given ARCH and a register number specified by REGNUM, return the
type of that register. */
static struct type *
mt_register_type (struct gdbarch *arch, int regnum)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (arch);
if (regnum >= 0 && regnum < MT_NUM_REGS + MT_NUM_PSEUDO_REGS)
{
switch (regnum)
{
case MT_PC_REGNUM:
case MT_RA_REGNUM:
case MT_IRA_REGNUM:
return builtin_type (arch)->builtin_func_ptr;
case MT_SP_REGNUM:
case MT_FP_REGNUM:
return builtin_type (arch)->builtin_data_ptr;
case MT_COPRO_REGNUM:
case MT_COPRO_PSEUDOREG_REGNUM:
if (tdep->copro_type == NULL)
{
struct type *elt = builtin_type (arch)->builtin_int16;
tdep->copro_type = lookup_array_range_type (elt, 0, 1);
}
return tdep->copro_type;
case MT_MAC_PSEUDOREG_REGNUM:
return mt_copro_register_type (arch,
MT_CPR0_REGNUM
+ MT_COPRO_PSEUDOREG_MAC_REGNUM);
default:
if (regnum >= MT_R0_REGNUM && regnum <= MT_R15_REGNUM)
return builtin_type (arch)->builtin_int32;
else if (regnum < MT_COPRO_PSEUDOREG_ARRAY)
return mt_copro_register_type (arch, regnum);
else
{
regnum -= MT_COPRO_PSEUDOREG_ARRAY;
regnum %= MT_COPRO_PSEUDOREG_REGS;
regnum += MT_CPR0_REGNUM;
return mt_copro_register_type (arch, regnum);
}
}
}
internal_error (__FILE__, __LINE__,
_("mt_register_type: illegal register number %d"), regnum);
}
/* Return true if register REGNUM is a member of the register group
specified by GROUP. */
static int
mt_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
struct reggroup *group)
{
/* Groups of registers that can be displayed via "info reg". */
if (group == all_reggroup)
return (regnum >= 0
&& regnum < MT_NUM_REGS + MT_NUM_PSEUDO_REGS
&& mt_register_name (gdbarch, regnum)[0] != '\0');
if (group == general_reggroup)
return (regnum >= MT_R0_REGNUM && regnum <= MT_R15_REGNUM);
if (group == float_reggroup)
return 0; /* No float regs. */
if (group == vector_reggroup)
return 0; /* No vector regs. */
/* For any that are not handled above. */
return default_register_reggroup_p (gdbarch, regnum, group);
}
/* Return the return value convention used for a given type TYPE.
Optionally, fetch or set the return value via READBUF or
WRITEBUF respectively using REGCACHE for the register
values. */
static enum return_value_convention
mt_return_value (struct gdbarch *gdbarch, struct value *function,
struct type *type, struct regcache *regcache,
gdb_byte *readbuf, const gdb_byte *writebuf)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
if (TYPE_LENGTH (type) > 4)
{
/* Return values > 4 bytes are returned in memory,
pointed to by R11. */
if (readbuf)
{
ULONGEST addr;
regcache_cooked_read_unsigned (regcache, MT_R11_REGNUM, &addr);
read_memory (addr, readbuf, TYPE_LENGTH (type));
}
if (writebuf)
{
ULONGEST addr;
regcache_cooked_read_unsigned (regcache, MT_R11_REGNUM, &addr);
write_memory (addr, writebuf, TYPE_LENGTH (type));
}
return RETURN_VALUE_ABI_RETURNS_ADDRESS;
}
else
{
if (readbuf)
{
ULONGEST temp;
/* Return values of <= 4 bytes are returned in R11. */
regcache_cooked_read_unsigned (regcache, MT_R11_REGNUM, &temp);
store_unsigned_integer (readbuf, TYPE_LENGTH (type),
byte_order, temp);
}
if (writebuf)
{
if (TYPE_LENGTH (type) < 4)
{
gdb_byte buf[4];
/* Add leading zeros to the value. */
memset (buf, 0, sizeof (buf));
memcpy (buf + sizeof (buf) - TYPE_LENGTH (type),
writebuf, TYPE_LENGTH (type));
regcache_cooked_write (regcache, MT_R11_REGNUM, buf);
}
else /* (TYPE_LENGTH (type) == 4 */
regcache_cooked_write (regcache, MT_R11_REGNUM, writebuf);
}
return RETURN_VALUE_REGISTER_CONVENTION;
}
}
/* If the input address, PC, is in a function prologue, return the
address of the end of the prologue, otherwise return the input
address.
Note: PC is likely to be the function start, since this function
is mainly used for advancing a breakpoint to the first line, or
stepping to the first line when we have stepped into a function
call. */
static CORE_ADDR
mt_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
CORE_ADDR func_addr = 0, func_end = 0;
const char *func_name;
unsigned long instr;
if (find_pc_partial_function (pc, &func_name, &func_addr, &func_end))
{
struct symtab_and_line sal;
struct symbol *sym;
/* Found a function. */
sym = lookup_symbol (func_name, NULL, VAR_DOMAIN, NULL).symbol;
if (sym && SYMBOL_LANGUAGE (sym) != language_asm)
{
/* Don't use this trick for assembly source files. */
sal = find_pc_line (func_addr, 0);
if (sal.end && sal.end < func_end)
{
/* Found a line number, use it as end of prologue. */
return sal.end;
}
}
}
/* No function symbol, or no line symbol. Use prologue scanning method. */
for (;; pc += 4)
{
instr = read_memory_unsigned_integer (pc, 4, byte_order);
if (instr == 0x12000000) /* nop */
continue;
if (instr == 0x12ddc000) /* copy sp into fp */
continue;
instr >>= 16;
if (instr == 0x05dd) /* subi sp, sp, imm */
continue;
if (instr >= 0x43c0 && instr <= 0x43df) /* push */
continue;
/* Not an obvious prologue instruction. */
break;
}
return pc;
}
/* The breakpoint instruction must be the same size as the smallest
instruction in the instruction set.
The BP for ms1 is defined as 0x68000000 (BREAK).
The BP for ms2 is defined as 0x69000000 (illegal). */
static const gdb_byte *
mt_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *bp_addr,
int *bp_size)
{
static gdb_byte ms1_breakpoint[] = { 0x68, 0, 0, 0 };
static gdb_byte ms2_breakpoint[] = { 0x69, 0, 0, 0 };
*bp_size = 4;
if (gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_ms2)
return ms2_breakpoint;
return ms1_breakpoint;
}
/* Select the correct coprocessor register bank. Return the pseudo
regnum we really want to read. */
static int
mt_select_coprocessor (struct gdbarch *gdbarch,
struct regcache *regcache, int regno)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
unsigned index, base;
gdb_byte copro[4];
/* Get the copro pseudo regnum. */
regcache_raw_read (regcache, MT_COPRO_REGNUM, copro);
base = ((extract_signed_integer (&copro[0], 2, byte_order)
* MT_COPRO_PSEUDOREG_DIM_2)
+ extract_signed_integer (&copro[2], 2, byte_order));
regno -= MT_COPRO_PSEUDOREG_ARRAY;
index = regno % MT_COPRO_PSEUDOREG_REGS;
regno /= MT_COPRO_PSEUDOREG_REGS;
if (base != regno)
{
/* Select the correct coprocessor register bank. Invalidate the
coprocessor register cache. */
unsigned ix;
store_signed_integer (&copro[0], 2, byte_order,
regno / MT_COPRO_PSEUDOREG_DIM_2);
store_signed_integer (&copro[2], 2, byte_order,
regno % MT_COPRO_PSEUDOREG_DIM_2);
regcache_raw_write (regcache, MT_COPRO_REGNUM, copro);
/* We must flush the cache, as it is now invalid. */
for (ix = MT_NUM_CPU_REGS; ix != MT_NUM_REGS; ix++)
regcache_invalidate (regcache, ix);
}
return index;
}
/* Fetch the pseudo registers:
There are two regular pseudo-registers:
1) The 'coprocessor' pseudo-register (which mirrors the
"real" coprocessor register sent by the target), and
2) The 'MAC' pseudo-register (which represents the union
of the original 32 bit target MAC register and the new
8-bit extended-MAC register).
Additionally there is an array of coprocessor registers which track
the coprocessor registers for each coprocessor. */
static enum register_status
mt_pseudo_register_read (struct gdbarch *gdbarch,
struct regcache *regcache, int regno, gdb_byte *buf)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
switch (regno)
{
case MT_COPRO_REGNUM:
case MT_COPRO_PSEUDOREG_REGNUM:
return regcache_raw_read (regcache, MT_COPRO_REGNUM, buf);
case MT_MAC_REGNUM:
case MT_MAC_PSEUDOREG_REGNUM:
if (gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_mrisc2
|| gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_ms2)
{
enum register_status status;
ULONGEST oldmac = 0, ext_mac = 0;
ULONGEST newmac;
status = regcache_cooked_read_unsigned (regcache, MT_MAC_REGNUM, &oldmac);
if (status != REG_VALID)
return status;
regcache_cooked_read_unsigned (regcache, MT_EXMAC_REGNUM, &ext_mac);
if (status != REG_VALID)
return status;
newmac =
(oldmac & 0xffffffff) | ((long long) (ext_mac & 0xff) << 32);
store_signed_integer (buf, 8, byte_order, newmac);
return REG_VALID;
}
else
return regcache_raw_read (regcache, MT_MAC_REGNUM, buf);
break;
default:
{
unsigned index = mt_select_coprocessor (gdbarch, regcache, regno);
if (index == MT_COPRO_PSEUDOREG_MAC_REGNUM)
return mt_pseudo_register_read (gdbarch, regcache,
MT_MAC_PSEUDOREG_REGNUM, buf);
else if (index < MT_NUM_REGS - MT_CPR0_REGNUM)
return regcache_raw_read (regcache, index + MT_CPR0_REGNUM, buf);
else
/* ??? */
return REG_VALID;
}
break;
}
}
/* Write the pseudo registers:
Mt pseudo-registers are stored directly to the target. The
'coprocessor' register is special, because when it is modified, all
the other coprocessor regs must be flushed from the reg cache. */
static void
mt_pseudo_register_write (struct gdbarch *gdbarch,
struct regcache *regcache,
int regno, const gdb_byte *buf)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
int i;
switch (regno)
{
case MT_COPRO_REGNUM:
case MT_COPRO_PSEUDOREG_REGNUM:
regcache_raw_write (regcache, MT_COPRO_REGNUM, buf);
for (i = MT_NUM_CPU_REGS; i < MT_NUM_REGS; i++)
regcache_invalidate (regcache, i);
break;
case MT_MAC_REGNUM:
case MT_MAC_PSEUDOREG_REGNUM:
if (gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_mrisc2
|| gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_ms2)
{
/* The 8-byte MAC pseudo-register must be broken down into two
32-byte registers. */
unsigned int oldmac, ext_mac;
ULONGEST newmac;
newmac = extract_unsigned_integer (buf, 8, byte_order);
oldmac = newmac & 0xffffffff;
ext_mac = (newmac >> 32) & 0xff;
regcache_cooked_write_unsigned (regcache, MT_MAC_REGNUM, oldmac);
regcache_cooked_write_unsigned (regcache, MT_EXMAC_REGNUM, ext_mac);
}
else
regcache_raw_write (regcache, MT_MAC_REGNUM, buf);
break;
default:
{
unsigned index = mt_select_coprocessor (gdbarch, regcache, regno);
if (index == MT_COPRO_PSEUDOREG_MAC_REGNUM)
mt_pseudo_register_write (gdbarch, regcache,
MT_MAC_PSEUDOREG_REGNUM, buf);
else if (index < MT_NUM_REGS - MT_CPR0_REGNUM)
regcache_raw_write (regcache, index + MT_CPR0_REGNUM, buf);
}
break;
}
}
static CORE_ADDR
mt_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
{
/* Register size is 4 bytes. */
return align_down (sp, 4);
}
/* Implements the "info registers" command. When ``all'' is non-zero,
the coprocessor registers will be printed in addition to the rest
of the registers. */
static void
mt_registers_info (struct gdbarch *gdbarch,
struct ui_file *file,
struct frame_info *frame, int regnum, int all)
{
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
if (regnum == -1)
{
int lim;
lim = all ? MT_NUM_REGS : MT_NUM_CPU_REGS;
for (regnum = 0; regnum < lim; regnum++)
{
/* Don't display the Qchannel register since it will be displayed
along with Ichannel. (See below.) */
if (regnum == MT_QCHANNEL_REGNUM)
continue;
mt_registers_info (gdbarch, file, frame, regnum, all);
/* Display the Qchannel register immediately after Ichannel. */
if (regnum == MT_ICHANNEL_REGNUM)
mt_registers_info (gdbarch, file, frame, MT_QCHANNEL_REGNUM, all);
}
}
else
{
if (regnum == MT_EXMAC_REGNUM)
return;
else if (regnum == MT_CONTEXT_REGNUM)
{
/* Special output handling for 38-bit context register. */
unsigned char *buff;
unsigned int *bytes, i, regsize;
regsize = register_size (gdbarch, regnum);
buff = (unsigned char *) alloca (regsize);
bytes = XALLOCAVEC (unsigned int, regsize);
deprecated_frame_register_read (frame, regnum, buff);
fputs_filtered (gdbarch_register_name
(gdbarch, regnum), file);
print_spaces_filtered (15 - strlen (gdbarch_register_name
(gdbarch, regnum)),
file);
fputs_filtered ("0x", file);
for (i = 0; i < regsize; i++)
fprintf_filtered (file, "%02x", (unsigned int)
extract_unsigned_integer (buff + i, 1, byte_order));
fputs_filtered ("\t", file);
print_longest (file, 'd', 0,
extract_unsigned_integer (buff, regsize, byte_order));
fputs_filtered ("\n", file);
}
else if (regnum == MT_COPRO_REGNUM
|| regnum == MT_COPRO_PSEUDOREG_REGNUM)
{
/* Special output handling for the 'coprocessor' register. */
gdb_byte *buf;
struct value_print_options opts;
buf = (gdb_byte *) alloca (register_size (gdbarch, MT_COPRO_REGNUM));
deprecated_frame_register_read (frame, MT_COPRO_REGNUM, buf);
/* And print. */
regnum = MT_COPRO_PSEUDOREG_REGNUM;
fputs_filtered (gdbarch_register_name (gdbarch, regnum),
file);
print_spaces_filtered (15 - strlen (gdbarch_register_name
(gdbarch, regnum)),
file);
get_no_prettyformat_print_options (&opts);
opts.deref_ref = 1;
val_print (register_type (gdbarch, regnum), buf,
0, 0, file, 0, NULL,
&opts, current_language);
fputs_filtered ("\n", file);
}
else if (regnum == MT_MAC_REGNUM || regnum == MT_MAC_PSEUDOREG_REGNUM)
{
ULONGEST oldmac, ext_mac, newmac;
gdb_byte buf[3 * sizeof (LONGEST)];
/* Get the two "real" mac registers. */
deprecated_frame_register_read (frame, MT_MAC_REGNUM, buf);
oldmac = extract_unsigned_integer
(buf, register_size (gdbarch, MT_MAC_REGNUM), byte_order);
if (gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_mrisc2
|| gdbarch_bfd_arch_info (gdbarch)->mach == bfd_mach_ms2)
{
deprecated_frame_register_read (frame, MT_EXMAC_REGNUM, buf);
ext_mac = extract_unsigned_integer
(buf, register_size (gdbarch, MT_EXMAC_REGNUM), byte_order);
}
else
ext_mac = 0;
/* Add them together. */
newmac = (oldmac & 0xffffffff) + ((ext_mac & 0xff) << 32);
/* And print. */
regnum = MT_MAC_PSEUDOREG_REGNUM;
fputs_filtered (gdbarch_register_name (gdbarch, regnum),
file);
print_spaces_filtered (15 - strlen (gdbarch_register_name
(gdbarch, regnum)),
file);
fputs_filtered ("0x", file);
print_longest (file, 'x', 0, newmac);
fputs_filtered ("\t", file);
print_longest (file, 'u', 0, newmac);
fputs_filtered ("\n", file);
}
else
default_print_registers_info (gdbarch, file, frame, regnum, all);
}
}
/* Set up the callee's arguments for an inferior function call. The
arguments are pushed on the stack or are placed in registers as
appropriate. It also sets up the return address (which points to
the call dummy breakpoint).
Returns the updated (and aligned) stack pointer. */
static CORE_ADDR
mt_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
struct regcache *regcache, CORE_ADDR bp_addr,
int nargs, struct value **args, CORE_ADDR sp,
int struct_return, CORE_ADDR struct_addr)
{
#define wordsize 4
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
gdb_byte buf[MT_MAX_STRUCT_SIZE];
int argreg = MT_1ST_ARGREG;
int split_param_len = 0;
int stack_dest = sp;
int slacklen;
int typelen;
int i, j;
/* First handle however many args we can fit into MT_1ST_ARGREG thru
MT_LAST_ARGREG. */
for (i = 0; i < nargs && argreg <= MT_LAST_ARGREG; i++)
{
const gdb_byte *val;
typelen = TYPE_LENGTH (value_type (args[i]));
switch (typelen)
{
case 1:
case 2:
case 3:
case 4:
regcache_cooked_write_unsigned (regcache, argreg++,
extract_unsigned_integer
(value_contents (args[i]),
wordsize, byte_order));
break;
case 8:
case 12:
case 16:
val = value_contents (args[i]);
while (typelen > 0)
{
if (argreg <= MT_LAST_ARGREG)
{
/* This word of the argument is passed in a register. */
regcache_cooked_write_unsigned (regcache, argreg++,
extract_unsigned_integer
(val, wordsize, byte_order));
typelen -= wordsize;
val += wordsize;
}
else
{
/* Remainder of this arg must be passed on the stack
(deferred to do later). */
split_param_len = typelen;
memcpy (buf, val, typelen);
break; /* No more args can be handled in regs. */
}
}
break;
default:
/* By reverse engineering of gcc output, args bigger than
16 bytes go on the stack, and their address is passed
in the argreg. */
stack_dest -= typelen;
write_memory (stack_dest, value_contents (args[i]), typelen);
regcache_cooked_write_unsigned (regcache, argreg++, stack_dest);
break;
}
}
/* Next, the rest of the arguments go onto the stack, in reverse order. */
for (j = nargs - 1; j >= i; j--)
{
gdb_byte *val;
struct cleanup *back_to;
const gdb_byte *contents = value_contents (args[j]);
/* Right-justify the value in an aligned-length buffer. */
typelen = TYPE_LENGTH (value_type (args[j]));
slacklen = (wordsize - (typelen % wordsize)) % wordsize;
val = (gdb_byte *) xmalloc (typelen + slacklen);
back_to = make_cleanup (xfree, val);
memcpy (val, contents, typelen);
memset (val + typelen, 0, slacklen);
/* Now write this data to the stack. */
stack_dest -= typelen + slacklen;
write_memory (stack_dest, val, typelen + slacklen);
do_cleanups (back_to);
}
/* Finally, if a param needs to be split between registers and stack,
write the second half to the stack now. */
if (split_param_len != 0)
{
stack_dest -= split_param_len;
write_memory (stack_dest, buf, split_param_len);
}
/* Set up return address (provided to us as bp_addr). */
regcache_cooked_write_unsigned (regcache, MT_RA_REGNUM, bp_addr);
/* Store struct return address, if given. */
if (struct_return && struct_addr != 0)
regcache_cooked_write_unsigned (regcache, MT_R11_REGNUM, struct_addr);
/* Set aside 16 bytes for the callee to save regs 1-4. */
stack_dest -= 16;
/* Update the stack pointer. */
regcache_cooked_write_unsigned (regcache, MT_SP_REGNUM, stack_dest);
/* And that should do it. Return the new stack pointer. */
return stack_dest;
}
/* The 'unwind_cache' data structure. */
struct mt_unwind_cache
{
/* The previous frame's inner most stack address.
Used as this frame ID's stack_addr. */
CORE_ADDR prev_sp;
CORE_ADDR frame_base;
int framesize;
int frameless_p;
/* Table indicating the location of each and every register. */
struct trad_frame_saved_reg *saved_regs;
};
/* Initialize an unwind_cache. Build up the saved_regs table etc. for
the frame. */
static struct mt_unwind_cache *
mt_frame_unwind_cache (struct frame_info *this_frame,
void **this_prologue_cache)
{
struct gdbarch *gdbarch;
struct mt_unwind_cache *info;
CORE_ADDR next_addr, start_addr, end_addr, prologue_end_addr;
unsigned long instr, upper_half, delayed_store = 0;
int regnum, offset;
ULONGEST sp, fp;
if ((*this_prologue_cache))
return (struct mt_unwind_cache *) (*this_prologue_cache);
gdbarch = get_frame_arch (this_frame);
info = FRAME_OBSTACK_ZALLOC (struct mt_unwind_cache);
(*this_prologue_cache) = info;
info->prev_sp = 0;
info->framesize = 0;
info->frame_base = 0;
info->frameless_p = 1;
info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
/* Grab the frame-relative values of SP and FP, needed below.
The frame_saved_register function will find them on the
stack or in the registers as appropriate. */
sp = get_frame_register_unsigned (this_frame, MT_SP_REGNUM);
fp = get_frame_register_unsigned (this_frame, MT_FP_REGNUM);
start_addr = get_frame_func (this_frame);
/* Return early if GDB couldn't find the function. */
if (start_addr == 0)
return info;
end_addr = get_frame_pc (this_frame);
prologue_end_addr = skip_prologue_using_sal (gdbarch, start_addr);
if (end_addr == 0)
for (next_addr = start_addr; next_addr < end_addr; next_addr += 4)
{
instr = get_frame_memory_unsigned (this_frame, next_addr, 4);
if (delayed_store) /* Previous instr was a push. */
{
upper_half = delayed_store >> 16;
regnum = upper_half & 0xf;
offset = delayed_store & 0xffff;
switch (upper_half & 0xfff0)
{
case 0x43c0: /* push using frame pointer. */
info->saved_regs[regnum].addr = offset;
break;
case 0x43d0: /* push using stack pointer. */
info->saved_regs[regnum].addr = offset;
break;
default: /* lint */
break;
}
delayed_store = 0;
}
switch (instr)
{
case 0x12000000: /* NO-OP */
continue;
case 0x12ddc000: /* copy sp into fp */
info->frameless_p = 0; /* Record that the frame
pointer is in use. */
continue;
default:
upper_half = instr >> 16;
if (upper_half == 0x05dd || /* subi sp, sp, imm */
upper_half == 0x07dd) /* subui sp, sp, imm */
{
/* Record the frame size. */
info->framesize = instr & 0xffff;
continue;
}
if ((upper_half & 0xfff0) == 0x43c0 || /* frame push */
(upper_half & 0xfff0) == 0x43d0) /* stack push */
{
/* Save this instruction, but don't record the
pushed register as 'saved' until we see the
next instruction. That's because of deferred stores
on this target -- GDB won't be able to read the register
from the stack until one instruction later. */
delayed_store = instr;
continue;
}
/* Not a prologue instruction. Is this the end of the prologue?
This is the most difficult decision; when to stop scanning.
If we have no line symbol, then the best thing we can do
is to stop scanning when we encounter an instruction that
is not likely to be a part of the prologue.
But if we do have a line symbol, then we should
keep scanning until we reach it (or we reach end_addr). */
if (prologue_end_addr && (prologue_end_addr > (next_addr + 4)))
continue; /* Keep scanning, recording saved_regs etc. */
else
break; /* Quit scanning: breakpoint can be set here. */
}
}
/* Special handling for the "saved" address of the SP:
The SP is of course never saved on the stack at all, so
by convention what we put here is simply the previous
_value_ of the SP (as opposed to an address where the
previous value would have been pushed). This will also
give us the frame base address. */
if (info->frameless_p)
{
info->frame_base = sp + info->framesize;
info->prev_sp = sp + info->framesize;
}
else
{
info->frame_base = fp + info->framesize;
info->prev_sp = fp + info->framesize;
}
/* Save prev_sp in saved_regs as a value, not as an address. */
trad_frame_set_value (info->saved_regs, MT_SP_REGNUM, info->prev_sp);
/* Now convert frame offsets to actual addresses (not offsets). */
for (regnum = 0; regnum < MT_NUM_REGS; regnum++)
if (trad_frame_addr_p (info->saved_regs, regnum))
info->saved_regs[regnum].addr += info->frame_base - info->framesize;
/* The call instruction moves the caller's PC in the callee's RA reg.
Since this is an unwind, do the reverse. Copy the location of RA
into PC (the address / regnum) so that a request for PC will be
converted into a request for the RA. */
info->saved_regs[MT_PC_REGNUM] = info->saved_regs[MT_RA_REGNUM];
return info;
}
static CORE_ADDR
mt_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
{
ULONGEST pc;
pc = frame_unwind_register_unsigned (next_frame, MT_PC_REGNUM);
return pc;
}
static CORE_ADDR
mt_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
{
ULONGEST sp;
sp = frame_unwind_register_unsigned (next_frame, MT_SP_REGNUM);
return sp;
}
/* Assuming THIS_FRAME is a dummy, return the frame ID of that dummy
frame. The frame ID's base needs to match the TOS value saved by
save_dummy_frame_tos(), and the PC match the dummy frame's breakpoint. */
static struct frame_id
mt_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
{
CORE_ADDR sp = get_frame_register_unsigned (this_frame, MT_SP_REGNUM);
return frame_id_build (sp, get_frame_pc (this_frame));
}
/* Given a GDB frame, determine the address of the calling function's
frame. This will be used to create a new GDB frame struct. */
static void
mt_frame_this_id (struct frame_info *this_frame,
void **this_prologue_cache, struct frame_id *this_id)
{
struct mt_unwind_cache *info =
mt_frame_unwind_cache (this_frame, this_prologue_cache);
if (!(info == NULL || info->prev_sp == 0))
(*this_id) = frame_id_build (info->prev_sp, get_frame_func (this_frame));
return;
}
static struct value *
mt_frame_prev_register (struct frame_info *this_frame,
void **this_prologue_cache, int regnum)
{
struct mt_unwind_cache *info =
mt_frame_unwind_cache (this_frame, this_prologue_cache);
return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
}
static CORE_ADDR
mt_frame_base_address (struct frame_info *this_frame,
void **this_prologue_cache)
{
struct mt_unwind_cache *info =
mt_frame_unwind_cache (this_frame, this_prologue_cache);
return info->frame_base;
}
/* This is a shared interface: the 'frame_unwind' object is what's
returned by the 'sniffer' function, and in turn specifies how to
get a frame's ID and prev_regs.
This exports the 'prev_register' and 'this_id' methods. */
static const struct frame_unwind mt_frame_unwind = {
NORMAL_FRAME,
default_frame_unwind_stop_reason,
mt_frame_this_id,
mt_frame_prev_register,
NULL,
default_frame_sniffer
};
/* Another shared interface: the 'frame_base' object specifies how to
unwind a frame and secure the base addresses for frame objects
(locals, args). */
static struct frame_base mt_frame_base = {
&mt_frame_unwind,
mt_frame_base_address,
mt_frame_base_address,
mt_frame_base_address
};
static struct gdbarch *
mt_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
{
struct gdbarch *gdbarch;
struct gdbarch_tdep *tdep;
/* Find a candidate among the list of pre-declared architectures. */
arches = gdbarch_list_lookup_by_info (arches, &info);
if (arches != NULL)
return arches->gdbarch;
/* None found, create a new architecture from the information
provided. */
tdep = XCNEW (struct gdbarch_tdep);
gdbarch = gdbarch_alloc (&info, tdep);
set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
set_gdbarch_register_name (gdbarch, mt_register_name);
set_gdbarch_num_regs (gdbarch, MT_NUM_REGS);
set_gdbarch_num_pseudo_regs (gdbarch, MT_NUM_PSEUDO_REGS);
set_gdbarch_pc_regnum (gdbarch, MT_PC_REGNUM);
set_gdbarch_sp_regnum (gdbarch, MT_SP_REGNUM);
set_gdbarch_pseudo_register_read (gdbarch, mt_pseudo_register_read);
set_gdbarch_pseudo_register_write (gdbarch, mt_pseudo_register_write);
set_gdbarch_skip_prologue (gdbarch, mt_skip_prologue);
set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
set_gdbarch_breakpoint_from_pc (gdbarch, mt_breakpoint_from_pc);
set_gdbarch_decr_pc_after_break (gdbarch, 0);
set_gdbarch_frame_args_skip (gdbarch, 0);
set_gdbarch_print_insn (gdbarch, print_insn_mt);
set_gdbarch_register_type (gdbarch, mt_register_type);
set_gdbarch_register_reggroup_p (gdbarch, mt_register_reggroup_p);
set_gdbarch_return_value (gdbarch, mt_return_value);
set_gdbarch_sp_regnum (gdbarch, MT_SP_REGNUM);
set_gdbarch_frame_align (gdbarch, mt_frame_align);
set_gdbarch_print_registers_info (gdbarch, mt_registers_info);
set_gdbarch_push_dummy_call (gdbarch, mt_push_dummy_call);
/* Target builtin data types. */
set_gdbarch_short_bit (gdbarch, 16);
set_gdbarch_int_bit (gdbarch, 32);
set_gdbarch_long_bit (gdbarch, 32);
set_gdbarch_long_long_bit (gdbarch, 64);
set_gdbarch_float_bit (gdbarch, 32);
set_gdbarch_double_bit (gdbarch, 64);
set_gdbarch_long_double_bit (gdbarch, 64);
set_gdbarch_ptr_bit (gdbarch, 32);
/* Register the DWARF 2 sniffer first, and then the traditional prologue
based sniffer. */
dwarf2_append_unwinders (gdbarch);
frame_unwind_append_unwinder (gdbarch, &mt_frame_unwind);
frame_base_set_default (gdbarch, &mt_frame_base);
/* Register the 'unwind_pc' method. */
set_gdbarch_unwind_pc (gdbarch, mt_unwind_pc);
set_gdbarch_unwind_sp (gdbarch, mt_unwind_sp);
/* Methods for saving / extracting a dummy frame's ID.
The ID's stack address must match the SP value returned by
PUSH_DUMMY_CALL, and saved by generic_save_dummy_frame_tos. */
set_gdbarch_dummy_id (gdbarch, mt_dummy_id);
return gdbarch;
}
/* Provide a prototype to silence -Wmissing-prototypes. */
extern initialize_file_ftype _initialize_mt_tdep;
void
_initialize_mt_tdep (void)
{
register_gdbarch_init (bfd_arch_mt, mt_gdbarch_init);
}
| gpl-2.0 |
heros/multi_realm_cell | dep_tc/acelite/ace/SOCK_Dgram.cpp | 266 | 19799 | // $Id: SOCK_Dgram.cpp 95533 2012-02-14 22:59:17Z wotte $
#include "ace/SOCK_Dgram.h"
#include "ace/Log_Msg.h"
#include "ace/INET_Addr.h"
#include "ace/ACE.h"
#include "ace/OS_NS_string.h"
#include "ace/OS_Memory.h"
#include "ace/OS_NS_ctype.h"
#include "ace/os_include/net/os_if.h"
#include "ace/Truncate.h"
#if !defined (__ACE_INLINE__)
# include "ace/SOCK_Dgram.inl"
#endif /* __ACE_INLINE__ */
#if defined (ACE_HAS_IPV6) && defined (ACE_WIN32)
#include /**/ <iphlpapi.h>
#endif
// This is a workaround for platforms with non-standard
// definitions of the ip_mreq structure
#if ! defined (IMR_MULTIADDR)
#define IMR_MULTIADDR imr_multiaddr
#endif /* ! defined (IMR_MULTIADDR) */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_ALLOC_HOOK_DEFINE (ACE_SOCK_Dgram)
void
ACE_SOCK_Dgram::dump (void) const
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_SOCK_Dgram::dump");
#endif /* ACE_HAS_DUMP */
}
// Allows a client to read from a socket without having to provide a
// buffer to read. This method determines how much data is in the
// socket, allocates a buffer of this size, reads in the data, and
// returns the number of bytes read.
ssize_t
ACE_SOCK_Dgram::recv (iovec *io_vec,
ACE_Addr &addr,
int flags,
const ACE_Time_Value *timeout) const
{
ACE_TRACE ("ACE_SOCK_Dgram::recv");
#if defined (FIONREAD)
if( ACE::handle_read_ready (this->get_handle (), timeout) != 1 )
{
return -1;
}
sockaddr *saddr = (sockaddr *) addr.get_addr ();
int addr_len = addr.get_size ();
int inlen;
if (ACE_OS::ioctl (this->get_handle (),
FIONREAD,
&inlen) == -1)
return -1;
else if (inlen > 0)
{
ACE_NEW_RETURN (io_vec->iov_base,
char[inlen],
-1);
ssize_t rcv_len = ACE_OS::recvfrom (this->get_handle (),
(char *) io_vec->iov_base,
inlen,
flags,
(sockaddr *) saddr,
&addr_len);
if (rcv_len < 0)
{
delete [] (char *)io_vec->iov_base;
io_vec->iov_base = 0;
}
else
{
io_vec->iov_len = ACE_Utils::truncate_cast<u_long> (rcv_len);
addr.set_size (addr_len);
}
return rcv_len;
}
else
return 0;
#else
ACE_UNUSED_ARG (flags);
ACE_UNUSED_ARG (addr);
ACE_UNUSED_ARG (io_vec);
ACE_UNUSED_ARG (timeout);
ACE_NOTSUP_RETURN (-1);
#endif /* FIONREAD */
}
// Here's the shared open function. Note that if we are using the
// PF_INET protocol family and the address of LOCAL == the address of
// the special variable SAP_ANY then we are going to arbitrarily bind
// to a portnumber.
int
ACE_SOCK_Dgram::shared_open (const ACE_Addr &local,
int protocol_family)
{
ACE_TRACE ("ACE_SOCK_Dgram::shared_open");
bool error = false;
if (local == ACE_Addr::sap_any)
{
if (protocol_family == PF_INET
#if defined (ACE_HAS_IPV6)
|| protocol_family == PF_INET6
#endif /* ACE_HAS_IPV6 */
)
{
if (ACE::bind_port (this->get_handle (),
INADDR_ANY,
protocol_family) == -1)
error = true;
}
}
else if (ACE_OS::bind (this->get_handle (),
reinterpret_cast<sockaddr *> (local.get_addr ()),
local.get_size ()) == -1)
error = true;
if (error)
this->close ();
return error ? -1 : 0;
}
int
ACE_SOCK_Dgram::open (const ACE_Addr &local,
int protocol_family,
int protocol,
ACE_Protocol_Info *protocolinfo,
ACE_SOCK_GROUP g,
u_long flags,
int reuse_addr)
{
if (ACE_SOCK::open (SOCK_DGRAM,
protocol_family,
protocol,
protocolinfo,
g,
flags,
reuse_addr) == -1)
return -1;
else if (this->shared_open (local,
protocol_family) == -1)
return -1;
else
return 0;
}
// Here's the general-purpose open routine.
int
ACE_SOCK_Dgram::open (const ACE_Addr &local,
int protocol_family,
int protocol,
int reuse_addr)
{
ACE_TRACE ("ACE_SOCK_Dgram::open");
if (local != ACE_Addr::sap_any)
protocol_family = local.get_type ();
else if (protocol_family == PF_UNSPEC)
{
#if defined (ACE_HAS_IPV6)
protocol_family = ACE::ipv6_enabled () ? PF_INET6 : PF_INET;
#else
protocol_family = PF_INET;
#endif /* ACE_HAS_IPV6 */
}
if (ACE_SOCK::open (SOCK_DGRAM,
protocol_family,
protocol,
reuse_addr) == -1)
return -1;
else
return this->shared_open (local,
protocol_family);
}
// Here's the general-purpose constructor used by a connectionless
// datagram ``server''...
ACE_SOCK_Dgram::ACE_SOCK_Dgram (const ACE_Addr &local,
int protocol_family,
int protocol,
int reuse_addr)
{
ACE_TRACE ("ACE_SOCK_Dgram::ACE_SOCK_Dgram");
if (this->open (local,
protocol_family,
protocol,
reuse_addr) == -1)
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("%p\n"),
ACE_TEXT ("ACE_SOCK_Dgram")));
}
ACE_SOCK_Dgram::ACE_SOCK_Dgram (const ACE_Addr &local,
int protocol_family,
int protocol,
ACE_Protocol_Info *protocolinfo,
ACE_SOCK_GROUP g,
u_long flags,
int reuse_addr)
{
ACE_TRACE ("ACE_SOCK_Dgram::ACE_SOCK_Dgram");
if (this->open (local,
protocol_family,
protocol,
protocolinfo,
g,
flags,
reuse_addr) == -1)
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("%p\n"),
ACE_TEXT ("ACE_SOCK_Dgram")));
}
#if defined (ACE_HAS_MSG)
// Send an iovec of size N to ADDR as a datagram (connectionless
// version).
ssize_t
ACE_SOCK_Dgram::send (const iovec iov[],
int n,
const ACE_Addr &addr,
int flags) const
{
ACE_TRACE ("ACE_SOCK_Dgram::send");
msghdr send_msg;
send_msg.msg_iov = (iovec *) iov;
send_msg.msg_iovlen = n;
#if defined (ACE_HAS_SOCKADDR_MSG_NAME)
send_msg.msg_name = (struct sockaddr *) addr.get_addr ();
#else
send_msg.msg_name = (char *) addr.get_addr ();
#endif /* ACE_HAS_SOCKADDR_MSG_NAME */
send_msg.msg_namelen = addr.get_size ();
#if defined (ACE_HAS_4_4BSD_SENDMSG_RECVMSG)
send_msg.msg_control = 0;
send_msg.msg_controllen = 0;
send_msg.msg_flags = 0;
#else
send_msg.msg_accrights = 0;
send_msg.msg_accrightslen = 0;
#endif /* ACE_HAS_4_4BSD_SENDMSG_RECVMSG */
return ACE_OS::sendmsg (this->get_handle (),
&send_msg,
flags);
}
// Recv an iovec of size N to ADDR as a datagram (connectionless
// version).
ssize_t
ACE_SOCK_Dgram::recv (iovec iov[],
int n,
ACE_Addr &addr,
int flags) const
{
ACE_TRACE ("ACE_SOCK_Dgram::recv");
msghdr recv_msg;
recv_msg.msg_iov = (iovec *) iov;
recv_msg.msg_iovlen = n;
#if defined (ACE_HAS_SOCKADDR_MSG_NAME)
recv_msg.msg_name = (struct sockaddr *) addr.get_addr ();
#else
recv_msg.msg_name = (char *) addr.get_addr ();
#endif /* ACE_HAS_SOCKADDR_MSG_NAME */
recv_msg.msg_namelen = addr.get_size ();
#if defined (ACE_HAS_4_4BSD_SENDMSG_RECVMSG)
recv_msg.msg_control = 0 ;
recv_msg.msg_controllen = 0 ;
#else
recv_msg.msg_accrights = 0;
recv_msg.msg_accrightslen = 0;
#endif /* ACE_HAS_4_4BSD_SENDMSG_RECVMSG */
ssize_t status = ACE_OS::recvmsg (this->get_handle (),
&recv_msg,
flags);
addr.set_size (recv_msg.msg_namelen);
addr.set_type (((sockaddr_in *) addr.get_addr())->sin_family);
return status;
}
#else /* ACE_HAS_MSG */
// Send an iovec of size N to ADDR as a datagram (connectionless
// version).
ssize_t
ACE_SOCK_Dgram::send (const iovec iov[],
int n,
const ACE_Addr &addr,
int flags) const
{
ACE_TRACE ("ACE_SOCK_Dgram::send");
size_t length = 0;
int i;
// Determine the total length of all the buffers in <iov>.
for (i = 0; i < n; i++)
#if ! (defined(__BORLANDC__) || defined(ACE_LINUX) || defined(ACE_HAS_RTEMS))
// The iov_len is unsigned on Linux, RTEMS and with Borland. If we go
// ahead and try the if, it will emit a warning.
if (iov[i].iov_len < 0)
return -1;
else
#endif
length += iov[i].iov_len;
char *buf = 0;
#if defined (ACE_HAS_ALLOCA)
buf = alloca (length);
#else
ACE_NEW_RETURN (buf,
char[length],
-1);
#endif /* !defined (ACE_HAS_ALLOCA) */
char *ptr = buf;
for (i = 0; i < n; i++)
{
ACE_OS::memcpy (ptr, iov[i].iov_base, iov[i].iov_len);
ptr += iov[i].iov_len;
}
ssize_t result = ACE_SOCK_Dgram::send (buf, length, addr, flags);
#if !defined (ACE_HAS_ALLOCA)
delete [] buf;
#endif /* !defined (ACE_HAS_ALLOCA) */
return result;
}
// Recv an iovec of size N to ADDR as a datagram (connectionless
// version).
ssize_t
ACE_SOCK_Dgram::recv (iovec iov[],
int n,
ACE_Addr &addr,
int flags) const
{
ACE_TRACE ("ACE_SOCK_Dgram::recv");
ssize_t length = 0;
int i;
for (i = 0; i < n; i++)
#if ! (defined(__BORLANDC__) || defined(ACE_LINUX) || defined(ACE_HAS_RTEMS))
// The iov_len is unsigned on Linux, RTEMS and with Borland. If we go
// ahead and try the if, it will emit a warning.
if (iov[i].iov_len < 0)
return -1;
else
#endif
length += iov[i].iov_len;
char *buf = 0;
#if defined (ACE_HAS_ALLOCA)
buf = alloca (length);
#else
ACE_NEW_RETURN (buf,
char[length],
-1);
#endif /* !defined (ACE_HAS_ALLOCA) */
length = ACE_SOCK_Dgram::recv (buf, length, addr, flags);
if (length != -1)
{
char *ptr = buf;
int copyn = length;
for (i = 0;
i < n && copyn > 0;
i++)
{
ACE_OS::memcpy (iov[i].iov_base, ptr,
// iov_len is int on some platforms, size_t on others
copyn > (int) iov[i].iov_len
? (size_t) iov[i].iov_len
: (size_t) copyn);
ptr += iov[i].iov_len;
copyn -= iov[i].iov_len;
}
}
#if !defined (ACE_HAS_ALLOCA)
delete [] buf;
#endif /* !defined (ACE_HAS_ALLOCA) */
return length;
}
#endif /* ACE_HAS_MSG */
ssize_t
ACE_SOCK_Dgram::recv (void *buf,
size_t n,
ACE_Addr &addr,
int flags,
const ACE_Time_Value *timeout) const
{
if( ACE::handle_read_ready (this->get_handle (), timeout) == 1 )
{
// Goes fine, call <recv> to get data
return this->recv (buf, n, addr, flags);
}
else
{
return -1;
}
}
ssize_t
ACE_SOCK_Dgram::send (const void *buf,
size_t n,
const ACE_Addr &addr,
int flags,
const ACE_Time_Value *timeout) const
{
// Check the status of the current socket.
if( ACE::handle_write_ready (this->get_handle (), timeout) == 1 )
{
// Goes fine, call <send> to transmit the data.
return this->send (buf, n, addr, flags);
}
else
{
return -1;
}
}
int
ACE_SOCK_Dgram::set_nic (const ACE_TCHAR *net_if,
int addr_family)
{
#if defined (IP_MULTICAST_IF) && (IP_MULTICAST_IF != 0)
# if defined (ACE_HAS_IPV6)
bool ipv6_mif_set = false;
if (addr_family == AF_INET6 || addr_family == AF_UNSPEC)
{
ACE_INET_Addr addr;
addr.set (static_cast<u_short> (0), ACE_IPV6_ANY);
ipv6_mreq send_mreq;
if (this->make_multicast_ifaddr6 (&send_mreq,
addr,
net_if) == -1)
return -1;
// Only let this attempt to set unknown interface when INET6 is
// specifically requested. Otherwise we will just try INET.
if (send_mreq.ipv6mr_interface != 0 || addr_family == AF_INET6)
{
if (this->ACE_SOCK::set_option
(IPPROTO_IPV6, IPV6_MULTICAST_IF,
&(send_mreq.ipv6mr_interface),
sizeof send_mreq.ipv6mr_interface) == -1)
return -1;
}
ipv6_mif_set = send_mreq.ipv6mr_interface != 0;
}
# if defined (ACE_WIN32)
// For Win32 net_if is distintly different between INET6 and INET
// so it is always either an INET6 if or an INET if.
if (!ipv6_mif_set && (addr_family == AF_INET || addr_family == AF_UNSPEC))
# else
if (addr_family == AF_INET || addr_family == AF_UNSPEC)
# endif
{
ACE_INET_Addr addr (static_cast<u_short> (0));
ip_mreq send_mreq;
if (this->make_multicast_ifaddr (&send_mreq,
addr,
net_if) == -1)
{
if (!ipv6_mif_set)
return -1;
}
else if (this->ACE_SOCK::set_option (IPPROTO_IP,
IP_MULTICAST_IF,
&(send_mreq.imr_interface),
sizeof send_mreq.imr_interface) == -1)
{
if (!ipv6_mif_set)
return -1;
}
}
# else /* ACE_HAS_IPV6 */
ACE_UNUSED_ARG (addr_family);
ACE_INET_Addr addr (static_cast<u_short> (0));
ip_mreq send_mreq;
if (this->make_multicast_ifaddr (&send_mreq,
addr,
net_if) == -1)
return -1;
if (this->ACE_SOCK::set_option (IPPROTO_IP,
IP_MULTICAST_IF,
&(send_mreq.imr_interface),
sizeof send_mreq.imr_interface) == -1)
return -1;
# endif /* !ACE_HAS_IPV6 */
#else /* IP_MULTICAST_IF */
// Send interface option not supported - ignore it.
// (We may have been invoked by ::subscribe, so we have to allow
// a non-null interface parameter in this function.)
ACE_UNUSED_ARG (net_if);
ACE_UNUSED_ARG (addr_family);
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("Send interface specification not ")
ACE_TEXT ("supported - IGNORED.\n")));
#endif /* !IP_MULTICAST_IF */
return 0;
}
int
ACE_SOCK_Dgram::make_multicast_ifaddr (ip_mreq *ret_mreq,
const ACE_INET_Addr &mcast_addr,
const ACE_TCHAR *net_if)
{
ACE_TRACE ("ACE_SOCK_Dgram_Mcast::make_multicast_ifaddr");
ip_mreq lmreq; // Scratch copy.
if (net_if != 0)
{
#if defined (ACE_WIN32) || defined(__INTERIX)
// This port number is not necessary, just convenient
ACE_INET_Addr interface_addr;
if (interface_addr.set (mcast_addr.get_port_number (), net_if) == -1)
return -1;
lmreq.imr_interface.s_addr =
ACE_HTONL (interface_addr.get_ip_address ());
#else
ifreq if_address;
ACE_OS::strcpy (if_address.ifr_name, ACE_TEXT_ALWAYS_CHAR (net_if));
if (ACE_OS::ioctl (this->get_handle (),
SIOCGIFADDR,
&if_address) == -1)
return -1;
sockaddr_in *socket_address =
reinterpret_cast<sockaddr_in*> (&if_address.ifr_addr);
lmreq.imr_interface.s_addr = socket_address->sin_addr.s_addr;
#endif /* ACE_WIN32 || __INTERIX */
}
else
lmreq.imr_interface.s_addr = INADDR_ANY;
lmreq.IMR_MULTIADDR.s_addr = ACE_HTONL (mcast_addr.get_ip_address ());
// Set return info, if requested.
if (ret_mreq)
*ret_mreq = lmreq;
return 0;
}
#if defined (ACE_HAS_IPV6)
// XXX: This will not work on any operating systems that do not support
// if_nametoindex or that is not Win32 >= Windows XP/Server 2003
int
ACE_SOCK_Dgram::make_multicast_ifaddr6 (ipv6_mreq *ret_mreq,
const ACE_INET_Addr &mcast_addr,
const ACE_TCHAR *net_if)
{
ACE_TRACE ("ACE_SOCK_Dgram_Mcast::make_multicast_ifaddr6");
ipv6_mreq lmreq; // Scratch copy.
ACE_OS::memset (&lmreq,
0,
sizeof (lmreq));
#if defined(ACE_LINUX)
if (net_if != 0)
{
lmreq.ipv6mr_interface = ACE_OS::if_nametoindex (ACE_TEXT_ALWAYS_CHAR(net_if));
}
else
#elif defined (ACE_WIN32)
if (net_if != 0)
{
int if_ix = 0;
bool num_if =
ACE_OS::ace_isdigit (net_if[0]) &&
(if_ix = ACE_OS::atoi (net_if)) > 0;
IP_ADAPTER_ADDRESSES tmp_addrs;
// Initial call to determine actual memory size needed
DWORD dwRetVal;
ULONG bufLen = 0;
if ((dwRetVal = ::GetAdaptersAddresses (AF_INET6,
0,
0,
&tmp_addrs,
&bufLen)) != ERROR_BUFFER_OVERFLOW)
return -1; // With output bufferlength 0 this can't be right.
// Get required output buffer and retrieve info for real.
PIP_ADAPTER_ADDRESSES pAddrs;
char *buf;
ACE_NEW_RETURN (buf,
char[bufLen],
-1);
pAddrs = reinterpret_cast<PIP_ADAPTER_ADDRESSES> (buf);
if ((dwRetVal = ::GetAdaptersAddresses (AF_INET6,
0,
0,
pAddrs,
&bufLen)) != NO_ERROR)
{
delete[] buf; // clean up
return -1;
}
lmreq.ipv6mr_interface = 0; // initialize
while (pAddrs)
{
if ((num_if && pAddrs->Ipv6IfIndex == static_cast<unsigned int>(if_ix))
|| (!num_if &&
(ACE_OS::strcmp (ACE_TEXT_ALWAYS_CHAR (net_if),
pAddrs->AdapterName) == 0
|| ACE_OS::strcmp (ACE_TEXT_ALWAYS_CHAR (net_if),
ACE_Wide_To_Ascii (pAddrs->FriendlyName).char_rep()) == 0)))
{
lmreq.ipv6mr_interface = pAddrs->Ipv6IfIndex;
break;
}
pAddrs = pAddrs->Next;
}
delete[] buf; // clean up
}
else
#else /* ACE_WIN32 */
ACE_UNUSED_ARG(net_if);
#endif /* ACE_WIN32 */
lmreq.ipv6mr_interface = 0;
// now set the multicast address
ACE_OS::memcpy (&lmreq.ipv6mr_multiaddr,
&((sockaddr_in6 *) mcast_addr.get_addr ())->sin6_addr,
sizeof (in6_addr));
// Set return info, if requested.
if (ret_mreq)
*ret_mreq = lmreq;
return 0;
}
#endif /* ACE_LINUX && ACE_HAS_IPV6 */
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
okeer/android_kernel_semc_msm7x30 | arch/powerpc/kernel/asm-offsets.c | 1546 | 28849 | /*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
#endif
#include <linux/kbuild.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/cache.h>
#include <asm/compat.h>
#include <asm/mmu.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
#endif
#ifdef CONFIG_PPC_POWERNV
#include <asm/opal.h>
#endif
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
#include <linux/kvm_host.h>
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
#include <asm/kvm_book3s.h>
#endif
#ifdef CONFIG_PPC32
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include "head_booke.h"
#endif
#endif
#if defined(CONFIG_PPC_FSL_BOOK3E)
#include "../mm/mmu_decl.h"
#endif
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
#endif
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
#endif /* CONFIG_VSX */
#ifdef CONFIG_PPC64
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#else /* CONFIG_PPC64 */
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
#endif
#ifdef CONFIG_SPE
DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#ifdef CONFIG_PPC64
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
context.low_slices_psize));
DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
context.high_slices_psize));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
#ifdef CONFIG_PPC_BOOK3E
DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit));
DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg));
DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_STD_MMU_64
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
#endif /* CONFIG_PPC_MM_SLICES */
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
DEFINE(SLBSHADOW_STACKVSID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
DEFINE(SLBSHADOW_STACKESID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
#endif /* CONFIG_PPC64 */
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#ifdef CONFIG_PPC64
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
/* hcall statistics */
DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
#endif /* CONFIG_PPC64 */
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
#ifndef CONFIG_PPC64
DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
#endif /* CONFIG_PPC64 */
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
#ifndef CONFIG_PPC64
DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
/*
* The PowerPC 400-class & Book-E processors have neither the DAR
* nor the DSISR SPRs. Hence, we overload them to hold the similar
* DEAR and ESR SPRs for such processors. For critical interrupts
* we use them to hold SRR0 and SRR1.
*/
DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
#else /* CONFIG_PPC64 */
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
#ifndef CONFIG_PPC64
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
#endif /* ! CONFIG_PPC64 */
/* About the CPU features table */
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
#ifndef CONFIG_PPC64
DEFINE(TASK_SIZE, TASK_SIZE);
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
#endif /* ! CONFIG_PPC64 */
/* datapage offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction));
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size));
#ifdef CONFIG_PPC64
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64));
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
#else
DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
#endif
/* timeval/timezone offsets for use by vdso */
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
/* Other bits used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
DEFINE(PTE_SIZE, sizeof(pte_t));
#ifdef CONFIG_KVM
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
#ifdef CONFIG_ALTIVEC
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
#endif
#ifdef CONFIG_VSX
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
#endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_64_HV
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
offsetof(struct kvmppc_vcpu_book3s, vcpu));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else
# define SVCPU_FIELD(x, f)
#endif
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else /* 32-bit */
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
#endif
SVCPU_FIELD(SVCPU_CR, cr);
SVCPU_FIELD(SVCPU_XER, xer);
SVCPU_FIELD(SVCPU_CTR, ctr);
SVCPU_FIELD(SVCPU_LR, lr);
SVCPU_FIELD(SVCPU_PC, pc);
SVCPU_FIELD(SVCPU_R0, gpr[0]);
SVCPU_FIELD(SVCPU_R1, gpr[1]);
SVCPU_FIELD(SVCPU_R2, gpr[2]);
SVCPU_FIELD(SVCPU_R3, gpr[3]);
SVCPU_FIELD(SVCPU_R4, gpr[4]);
SVCPU_FIELD(SVCPU_R5, gpr[5]);
SVCPU_FIELD(SVCPU_R6, gpr[6]);
SVCPU_FIELD(SVCPU_R7, gpr[7]);
SVCPU_FIELD(SVCPU_R8, gpr[8]);
SVCPU_FIELD(SVCPU_R9, gpr[9]);
SVCPU_FIELD(SVCPU_R10, gpr[10]);
SVCPU_FIELD(SVCPU_R11, gpr[11]);
SVCPU_FIELD(SVCPU_R12, gpr[12]);
SVCPU_FIELD(SVCPU_R13, gpr[13]);
SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
SVCPU_FIELD(SVCPU_SR, sr);
#endif
#ifdef CONFIG_PPC64
SVCPU_FIELD(SVCPU_SLB, slb);
SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
#endif
HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_64_HV
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
HSTATE_FIELD(HSTATE_PMC, host_pmc);
HSTATE_FIELD(HSTATE_PURR, host_purr);
HSTATE_FIELD(HSTATE_SPURR, host_spurr);
HSTATE_FIELD(HSTATE_DSCR, host_dscr);
HSTATE_FIELD(HSTATE_DABR, dabr);
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */
#ifdef CONFIG_KVM_GUEST
DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
scratch1));
DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared,
scratch2));
DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared,
scratch3));
DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared,
int_pending));
DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared,
critical));
DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr));
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbl));
DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
arch.timing_last_enter.tv32.tbu));
DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
arch.timing_last_enter.tv32.tbl));
#endif
#ifdef CONFIG_PPC_POWERNV
DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
#endif
return 0;
}
| gpl-2.0 |
googyanas/Googy-Max3-Kernel | drivers/media/dvb/frontends/ds3000.c | 4874 | 30906 | /*
Montage Technology DS3000/TS2020 - DVBS/S2 Demodulator/Tuner driver
Copyright (C) 2009 Konstantin Dimitrov <kosio.dimitrov@gmail.com>
Copyright (C) 2009 TurboSight.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/firmware.h>
#include "dvb_frontend.h"
#include "ds3000.h"
static int debug;
#define dprintk(args...) \
do { \
if (debug) \
printk(args); \
} while (0)
/* as of March 2009 current DS3000 firmware version is 1.78 */
/* DS3000 FW v1.78 MD5: a32d17910c4f370073f9346e71d34b80 */
#define DS3000_DEFAULT_FIRMWARE "dvb-fe-ds3000.fw"
#define DS3000_SAMPLE_RATE 96000 /* in kHz */
#define DS3000_XTAL_FREQ 27000 /* in kHz */
/* Register values to initialise the demod in DVB-S mode */
static u8 ds3000_dvbs_init_tab[] = {
0x23, 0x05,
0x08, 0x03,
0x0c, 0x00,
0x21, 0x54,
0x25, 0x82,
0x27, 0x31,
0x30, 0x08,
0x31, 0x40,
0x32, 0x32,
0x33, 0x35,
0x35, 0xff,
0x3a, 0x00,
0x37, 0x10,
0x38, 0x10,
0x39, 0x02,
0x42, 0x60,
0x4a, 0x40,
0x4b, 0x04,
0x4d, 0x91,
0x5d, 0xc8,
0x50, 0x77,
0x51, 0x77,
0x52, 0x36,
0x53, 0x36,
0x56, 0x01,
0x63, 0x43,
0x64, 0x30,
0x65, 0x40,
0x68, 0x26,
0x69, 0x4c,
0x70, 0x20,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x40,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x60,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x80,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0xa0,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x1f,
0x76, 0x00,
0x77, 0xd1,
0x78, 0x0c,
0x79, 0x80,
0x7f, 0x04,
0x7c, 0x00,
0x80, 0x86,
0x81, 0xa6,
0x85, 0x04,
0xcd, 0xf4,
0x90, 0x33,
0xa0, 0x44,
0xc0, 0x18,
0xc3, 0x10,
0xc4, 0x08,
0xc5, 0x80,
0xc6, 0x80,
0xc7, 0x0a,
0xc8, 0x1a,
0xc9, 0x80,
0xfe, 0x92,
0xe0, 0xf8,
0xe6, 0x8b,
0xd0, 0x40,
0xf8, 0x20,
0xfa, 0x0f,
0xfd, 0x20,
0xad, 0x20,
0xae, 0x07,
0xb8, 0x00,
};
/* Register values to initialise the demod in DVB-S2 mode */
static u8 ds3000_dvbs2_init_tab[] = {
0x23, 0x0f,
0x08, 0x07,
0x0c, 0x00,
0x21, 0x54,
0x25, 0x82,
0x27, 0x31,
0x30, 0x08,
0x31, 0x32,
0x32, 0x32,
0x33, 0x35,
0x35, 0xff,
0x3a, 0x00,
0x37, 0x10,
0x38, 0x10,
0x39, 0x02,
0x42, 0x60,
0x4a, 0x80,
0x4b, 0x04,
0x4d, 0x81,
0x5d, 0x88,
0x50, 0x36,
0x51, 0x36,
0x52, 0x36,
0x53, 0x36,
0x63, 0x60,
0x64, 0x10,
0x65, 0x10,
0x68, 0x04,
0x69, 0x29,
0x70, 0x20,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x40,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x60,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x80,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0xa0,
0x71, 0x70,
0x72, 0x04,
0x73, 0x00,
0x70, 0x1f,
0xa0, 0x44,
0xc0, 0x08,
0xc1, 0x10,
0xc2, 0x08,
0xc3, 0x10,
0xc4, 0x08,
0xc5, 0xf0,
0xc6, 0xf0,
0xc7, 0x0a,
0xc8, 0x1a,
0xc9, 0x80,
0xca, 0x23,
0xcb, 0x24,
0xce, 0x74,
0x90, 0x03,
0x76, 0x80,
0x77, 0x42,
0x78, 0x0a,
0x79, 0x80,
0xad, 0x40,
0xae, 0x07,
0x7f, 0xd4,
0x7c, 0x00,
0x80, 0xa8,
0x81, 0xda,
0x7c, 0x01,
0x80, 0xda,
0x81, 0xec,
0x7c, 0x02,
0x80, 0xca,
0x81, 0xeb,
0x7c, 0x03,
0x80, 0xba,
0x81, 0xdb,
0x85, 0x08,
0x86, 0x00,
0x87, 0x02,
0x89, 0x80,
0x8b, 0x44,
0x8c, 0xaa,
0x8a, 0x10,
0xba, 0x00,
0xf5, 0x04,
0xfe, 0x44,
0xd2, 0x32,
0xb8, 0x00,
};
struct ds3000_state {
struct i2c_adapter *i2c;
const struct ds3000_config *config;
struct dvb_frontend frontend;
u8 skip_fw_load;
/* previous uncorrected block counter for DVB-S2 */
u16 prevUCBS2;
};
static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
{
u8 buf[] = { reg, data };
struct i2c_msg msg = { .addr = state->config->demod_address,
.flags = 0, .buf = buf, .len = 2 };
int err;
dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data);
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
" value == 0x%02x)\n", __func__, err, reg, data);
return -EREMOTEIO;
}
return 0;
}
static int ds3000_tuner_writereg(struct ds3000_state *state, int reg, int data)
{
u8 buf[] = { reg, data };
struct i2c_msg msg = { .addr = 0x60,
.flags = 0, .buf = buf, .len = 2 };
int err;
dprintk("%s: write reg 0x%02x, value 0x%02x\n", __func__, reg, data);
ds3000_writereg(state, 0x03, 0x11);
err = i2c_transfer(state->i2c, &msg, 1);
if (err != 1) {
printk("%s: writereg error(err == %i, reg == 0x%02x,"
" value == 0x%02x)\n", __func__, err, reg, data);
return -EREMOTEIO;
}
return 0;
}
/* I2C write for 8k firmware load */
static int ds3000_writeFW(struct ds3000_state *state, int reg,
const u8 *data, u16 len)
{
int i, ret = -EREMOTEIO;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "Unable to kmalloc\n");
ret = -ENOMEM;
goto error;
}
*(buf) = reg;
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.buf = buf;
msg.len = 33;
for (i = 0; i < len; i += 32) {
memcpy(buf + 1, data + i, 32);
dprintk("%s: write reg 0x%02x, len = %d\n", __func__, reg, len);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1) {
printk(KERN_ERR "%s: write error(err == %i, "
"reg == 0x%02x\n", __func__, ret, reg);
ret = -EREMOTEIO;
}
}
error:
kfree(buf);
return ret;
}
static int ds3000_readreg(struct ds3000_state *state, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{
.addr = state->config->demod_address,
.flags = 0,
.buf = b0,
.len = 1
}, {
.addr = state->config->demod_address,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
}
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret);
return ret;
}
dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]);
return b1[0];
}
static int ds3000_tuner_readreg(struct ds3000_state *state, u8 reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{
.addr = 0x60,
.flags = 0,
.buf = b0,
.len = 1
}, {
.addr = 0x60,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
}
};
ds3000_writereg(state, 0x03, 0x12);
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
printk(KERN_ERR "%s: reg=0x%x(error=%d)\n", __func__, reg, ret);
return ret;
}
dprintk("%s: read reg 0x%02x, value 0x%02x\n", __func__, reg, b1[0]);
return b1[0];
}
static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw);
static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
const struct firmware *fw;
int ret = 0;
dprintk("%s()\n", __func__);
if (ds3000_readreg(state, 0xb2) <= 0)
return ret;
if (state->skip_fw_load)
return 0;
/* Load firmware */
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", __func__,
DS3000_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, DS3000_DEFAULT_FIRMWARE,
state->i2c->dev.parent);
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__);
if (ret) {
printk(KERN_ERR "%s: No firmware uploaded (timeout or file not "
"found?)\n", __func__);
return ret;
}
/* Make sure we don't recurse back through here during loading */
state->skip_fw_load = 1;
ret = ds3000_load_firmware(fe, fw);
if (ret)
printk("%s: Writing firmware to device failed\n", __func__);
release_firmware(fw);
dprintk("%s: Firmware upload %s\n", __func__,
ret == 0 ? "complete" : "failed");
/* Ensure firmware is always loaded if required */
state->skip_fw_load = 0;
return ret;
}
static int ds3000_load_firmware(struct dvb_frontend *fe,
const struct firmware *fw)
{
struct ds3000_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
dprintk("Firmware is %zu bytes (%02x %02x .. %02x %02x)\n",
fw->size,
fw->data[0],
fw->data[1],
fw->data[fw->size - 2],
fw->data[fw->size - 1]);
/* Begin the firmware load process */
ds3000_writereg(state, 0xb2, 0x01);
/* write the entire firmware */
ds3000_writeFW(state, 0xb0, fw->data, fw->size);
ds3000_writereg(state, 0xb2, 0x00);
return 0;
}
static int ds3000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct ds3000_state *state = fe->demodulator_priv;
u8 data;
dprintk("%s(%d)\n", __func__, voltage);
data = ds3000_readreg(state, 0xa2);
data |= 0x03; /* bit0 V/H, bit1 off/on */
switch (voltage) {
case SEC_VOLTAGE_18:
data &= ~0x03;
break;
case SEC_VOLTAGE_13:
data &= ~0x03;
data |= 0x01;
break;
case SEC_VOLTAGE_OFF:
break;
}
ds3000_writereg(state, 0xa2, data);
return 0;
}
static int ds3000_read_status(struct dvb_frontend *fe, fe_status_t* status)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int lock;
*status = 0;
switch (c->delivery_system) {
case SYS_DVBS:
lock = ds3000_readreg(state, 0xd1);
if ((lock & 0x07) == 0x07)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
break;
case SYS_DVBS2:
lock = ds3000_readreg(state, 0x0d);
if ((lock & 0x8f) == 0x8f)
*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
break;
default:
return 1;
}
dprintk("%s: status = 0x%02x\n", __func__, lock);
return 0;
}
/* read DS3000 BER value */
static int ds3000_read_ber(struct dvb_frontend *fe, u32* ber)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 data;
u32 ber_reading, lpdc_frames;
dprintk("%s()\n", __func__);
switch (c->delivery_system) {
case SYS_DVBS:
/* set the number of bytes checked during
BER estimation */
ds3000_writereg(state, 0xf9, 0x04);
/* read BER estimation status */
data = ds3000_readreg(state, 0xf8);
/* check if BER estimation is ready */
if ((data & 0x10) == 0) {
/* this is the number of error bits,
to calculate the bit error rate
divide to 8388608 */
*ber = (ds3000_readreg(state, 0xf7) << 8) |
ds3000_readreg(state, 0xf6);
/* start counting error bits */
/* need to be set twice
otherwise it fails sometimes */
data |= 0x10;
ds3000_writereg(state, 0xf8, data);
ds3000_writereg(state, 0xf8, data);
} else
/* used to indicate that BER estimation
is not ready, i.e. BER is unknown */
*ber = 0xffffffff;
break;
case SYS_DVBS2:
/* read the number of LPDC decoded frames */
lpdc_frames = (ds3000_readreg(state, 0xd7) << 16) |
(ds3000_readreg(state, 0xd6) << 8) |
ds3000_readreg(state, 0xd5);
/* read the number of packets with bad CRC */
ber_reading = (ds3000_readreg(state, 0xf8) << 8) |
ds3000_readreg(state, 0xf7);
if (lpdc_frames > 750) {
/* clear LPDC frame counters */
ds3000_writereg(state, 0xd1, 0x01);
/* clear bad packets counter */
ds3000_writereg(state, 0xf9, 0x01);
/* enable bad packets counter */
ds3000_writereg(state, 0xf9, 0x00);
/* enable LPDC frame counters */
ds3000_writereg(state, 0xd1, 0x00);
*ber = ber_reading;
} else
/* used to indicate that BER estimation is not ready,
i.e. BER is unknown */
*ber = 0xffffffff;
break;
default:
return 1;
}
return 0;
}
/* read TS2020 signal strength */
static int ds3000_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
struct ds3000_state *state = fe->demodulator_priv;
u16 sig_reading, sig_strength;
u8 rfgain, bbgain;
dprintk("%s()\n", __func__);
rfgain = ds3000_tuner_readreg(state, 0x3d) & 0x1f;
bbgain = ds3000_tuner_readreg(state, 0x21) & 0x1f;
if (rfgain > 15)
rfgain = 15;
if (bbgain > 13)
bbgain = 13;
sig_reading = rfgain * 2 + bbgain * 3;
sig_strength = 40 + (64 - sig_reading) * 50 / 64 ;
/* cook the value to be suitable for szap-s2 human readable output */
*signal_strength = sig_strength * 1000;
dprintk("%s: raw / cooked = 0x%04x / 0x%04x\n", __func__,
sig_reading, *signal_strength);
return 0;
}
/* calculate DS3000 snr value in dB */
static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 snr_reading, snr_value;
u32 dvbs2_signal_reading, dvbs2_noise_reading, tmp;
static const u16 dvbs_snr_tab[] = { /* 20 x Table (rounded up) */
0x0000, 0x1b13, 0x2aea, 0x3627, 0x3ede, 0x45fe, 0x4c03,
0x513a, 0x55d4, 0x59f2, 0x5dab, 0x6111, 0x6431, 0x6717,
0x69c9, 0x6c4e, 0x6eac, 0x70e8, 0x7304, 0x7505
};
static const u16 dvbs2_snr_tab[] = { /* 80 x Table (rounded up) */
0x0000, 0x0bc2, 0x12a3, 0x1785, 0x1b4e, 0x1e65, 0x2103,
0x2347, 0x2546, 0x2710, 0x28ae, 0x2a28, 0x2b83, 0x2cc5,
0x2df1, 0x2f09, 0x3010, 0x3109, 0x31f4, 0x32d2, 0x33a6,
0x3470, 0x3531, 0x35ea, 0x369b, 0x3746, 0x37ea, 0x3888,
0x3920, 0x39b3, 0x3a42, 0x3acc, 0x3b51, 0x3bd3, 0x3c51,
0x3ccb, 0x3d42, 0x3db6, 0x3e27, 0x3e95, 0x3f00, 0x3f68,
0x3fcf, 0x4033, 0x4094, 0x40f4, 0x4151, 0x41ac, 0x4206,
0x425e, 0x42b4, 0x4308, 0x435b, 0x43ac, 0x43fc, 0x444a,
0x4497, 0x44e2, 0x452d, 0x4576, 0x45bd, 0x4604, 0x4649,
0x468e, 0x46d1, 0x4713, 0x4755, 0x4795, 0x47d4, 0x4813,
0x4851, 0x488d, 0x48c9, 0x4904, 0x493f, 0x4978, 0x49b1,
0x49e9, 0x4a20, 0x4a57
};
dprintk("%s()\n", __func__);
switch (c->delivery_system) {
case SYS_DVBS:
snr_reading = ds3000_readreg(state, 0xff);
snr_reading /= 8;
if (snr_reading == 0)
*snr = 0x0000;
else {
if (snr_reading > 20)
snr_reading = 20;
snr_value = dvbs_snr_tab[snr_reading - 1] * 10 / 23026;
/* cook the value to be suitable for szap-s2
human readable output */
*snr = snr_value * 8 * 655;
}
dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__,
snr_reading, *snr);
break;
case SYS_DVBS2:
dvbs2_noise_reading = (ds3000_readreg(state, 0x8c) & 0x3f) +
(ds3000_readreg(state, 0x8d) << 4);
dvbs2_signal_reading = ds3000_readreg(state, 0x8e);
tmp = dvbs2_signal_reading * dvbs2_signal_reading >> 1;
if (tmp == 0) {
*snr = 0x0000;
return 0;
}
if (dvbs2_noise_reading == 0) {
snr_value = 0x0013;
/* cook the value to be suitable for szap-s2
human readable output */
*snr = 0xffff;
return 0;
}
if (tmp > dvbs2_noise_reading) {
snr_reading = tmp / dvbs2_noise_reading;
if (snr_reading > 80)
snr_reading = 80;
snr_value = dvbs2_snr_tab[snr_reading - 1] / 1000;
/* cook the value to be suitable for szap-s2
human readable output */
*snr = snr_value * 5 * 655;
} else {
snr_reading = dvbs2_noise_reading / tmp;
if (snr_reading > 80)
snr_reading = 80;
*snr = -(dvbs2_snr_tab[snr_reading] / 1000);
}
dprintk("%s: raw / cooked = 0x%02x / 0x%04x\n", __func__,
snr_reading, *snr);
break;
default:
return 1;
}
return 0;
}
/* read DS3000 uncorrected blocks */
static int ds3000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 data;
u16 _ucblocks;
dprintk("%s()\n", __func__);
switch (c->delivery_system) {
case SYS_DVBS:
*ucblocks = (ds3000_readreg(state, 0xf5) << 8) |
ds3000_readreg(state, 0xf4);
data = ds3000_readreg(state, 0xf8);
/* clear packet counters */
data &= ~0x20;
ds3000_writereg(state, 0xf8, data);
/* enable packet counters */
data |= 0x20;
ds3000_writereg(state, 0xf8, data);
break;
case SYS_DVBS2:
_ucblocks = (ds3000_readreg(state, 0xe2) << 8) |
ds3000_readreg(state, 0xe1);
if (_ucblocks > state->prevUCBS2)
*ucblocks = _ucblocks - state->prevUCBS2;
else
*ucblocks = state->prevUCBS2 - _ucblocks;
state->prevUCBS2 = _ucblocks;
break;
default:
return 1;
}
return 0;
}
static int ds3000_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct ds3000_state *state = fe->demodulator_priv;
u8 data;
dprintk("%s(%d)\n", __func__, tone);
if ((tone != SEC_TONE_ON) && (tone != SEC_TONE_OFF)) {
printk(KERN_ERR "%s: Invalid, tone=%d\n", __func__, tone);
return -EINVAL;
}
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
ds3000_writereg(state, 0xa2, data);
switch (tone) {
case SEC_TONE_ON:
dprintk("%s: setting tone on\n", __func__);
data = ds3000_readreg(state, 0xa1);
data &= ~0x43;
data |= 0x04;
ds3000_writereg(state, 0xa1, data);
break;
case SEC_TONE_OFF:
dprintk("%s: setting tone off\n", __func__);
data = ds3000_readreg(state, 0xa2);
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
break;
}
return 0;
}
static int ds3000_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *d)
{
struct ds3000_state *state = fe->demodulator_priv;
int i;
u8 data;
/* Dump DiSEqC message */
dprintk("%s(", __func__);
for (i = 0 ; i < d->msg_len;) {
dprintk("0x%02x", d->msg[i]);
if (++i < d->msg_len)
dprintk(", ");
}
/* enable DiSEqC message send pin */
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
ds3000_writereg(state, 0xa2, data);
/* DiSEqC message */
for (i = 0; i < d->msg_len; i++)
ds3000_writereg(state, 0xa3 + i, d->msg[i]);
data = ds3000_readreg(state, 0xa1);
/* clear DiSEqC message length and status,
enable DiSEqC message send */
data &= ~0xf8;
/* set DiSEqC mode, modulation active during 33 pulses,
set DiSEqC message length */
data |= ((d->msg_len - 1) << 3) | 0x07;
ds3000_writereg(state, 0xa1, data);
/* wait up to 150ms for DiSEqC transmission to complete */
for (i = 0; i < 15; i++) {
data = ds3000_readreg(state, 0xa1);
if ((data & 0x40) == 0)
break;
msleep(10);
}
/* DiSEqC timeout after 150ms */
if (i == 15) {
data = ds3000_readreg(state, 0xa1);
data &= ~0x80;
data |= 0x40;
ds3000_writereg(state, 0xa1, data);
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
return 1;
}
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
return 0;
}
/* Send DiSEqC burst */
static int ds3000_diseqc_send_burst(struct dvb_frontend *fe,
fe_sec_mini_cmd_t burst)
{
struct ds3000_state *state = fe->demodulator_priv;
int i;
u8 data;
dprintk("%s()\n", __func__);
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
ds3000_writereg(state, 0xa2, data);
/* DiSEqC burst */
if (burst == SEC_MINI_A)
/* Unmodulated tone burst */
ds3000_writereg(state, 0xa1, 0x02);
else if (burst == SEC_MINI_B)
/* Modulated tone burst */
ds3000_writereg(state, 0xa1, 0x01);
else
return -EINVAL;
msleep(13);
for (i = 0; i < 5; i++) {
data = ds3000_readreg(state, 0xa1);
if ((data & 0x40) == 0)
break;
msleep(1);
}
if (i == 5) {
data = ds3000_readreg(state, 0xa1);
data &= ~0x80;
data |= 0x40;
ds3000_writereg(state, 0xa1, data);
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
return 1;
}
data = ds3000_readreg(state, 0xa2);
data &= ~0xc0;
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
return 0;
}
static void ds3000_release(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
dprintk("%s\n", __func__);
kfree(state);
}
static struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
{
struct ds3000_state *state = NULL;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL);
if (state == NULL) {
printk(KERN_ERR "Unable to kmalloc\n");
goto error2;
}
state->config = config;
state->i2c = i2c;
state->prevUCBS2 = 0;
/* check if the demod is present */
ret = ds3000_readreg(state, 0x00) & 0xfe;
if (ret != 0xe0) {
printk(KERN_ERR "Invalid probe, probably not a DS3000\n");
goto error3;
}
printk(KERN_INFO "DS3000 chip version: %d.%d attached.\n",
ds3000_readreg(state, 0x02),
ds3000_readreg(state, 0x01));
memcpy(&state->frontend.ops, &ds3000_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error3:
kfree(state);
error2:
return NULL;
}
EXPORT_SYMBOL(ds3000_attach);
static int ds3000_set_carrier_offset(struct dvb_frontend *fe,
s32 carrier_offset_khz)
{
struct ds3000_state *state = fe->demodulator_priv;
s32 tmp;
tmp = carrier_offset_khz;
tmp *= 65536;
tmp = (2 * tmp + DS3000_SAMPLE_RATE) / (2 * DS3000_SAMPLE_RATE);
if (tmp < 0)
tmp += 65536;
ds3000_writereg(state, 0x5f, tmp >> 8);
ds3000_writereg(state, 0x5e, tmp & 0xff);
return 0;
}
static int ds3000_set_frontend(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i;
fe_status_t status;
u8 mlpf, mlpf_new, mlpf_max, mlpf_min, nlpf, div4;
s32 offset_khz;
u16 value, ndiv;
u32 f3db;
dprintk("%s() ", __func__);
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
/* Tune */
/* unknown */
ds3000_tuner_writereg(state, 0x07, 0x02);
ds3000_tuner_writereg(state, 0x10, 0x00);
ds3000_tuner_writereg(state, 0x60, 0x79);
ds3000_tuner_writereg(state, 0x08, 0x01);
ds3000_tuner_writereg(state, 0x00, 0x01);
div4 = 0;
/* calculate and set freq divider */
if (c->frequency < 1146000) {
ds3000_tuner_writereg(state, 0x10, 0x11);
div4 = 1;
ndiv = ((c->frequency * (6 + 8) * 4) +
(DS3000_XTAL_FREQ / 2)) /
DS3000_XTAL_FREQ - 1024;
} else {
ds3000_tuner_writereg(state, 0x10, 0x01);
ndiv = ((c->frequency * (6 + 8) * 2) +
(DS3000_XTAL_FREQ / 2)) /
DS3000_XTAL_FREQ - 1024;
}
ds3000_tuner_writereg(state, 0x01, (ndiv & 0x0f00) >> 8);
ds3000_tuner_writereg(state, 0x02, ndiv & 0x00ff);
/* set pll */
ds3000_tuner_writereg(state, 0x03, 0x06);
ds3000_tuner_writereg(state, 0x51, 0x0f);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x10);
ds3000_tuner_writereg(state, 0x50, 0x00);
msleep(5);
/* unknown */
ds3000_tuner_writereg(state, 0x51, 0x17);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x08);
ds3000_tuner_writereg(state, 0x50, 0x00);
msleep(5);
value = ds3000_tuner_readreg(state, 0x3d);
value &= 0x0f;
if ((value > 4) && (value < 15)) {
value -= 3;
if (value < 4)
value = 4;
value = ((value << 3) | 0x01) & 0x79;
}
ds3000_tuner_writereg(state, 0x60, value);
ds3000_tuner_writereg(state, 0x51, 0x17);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x08);
ds3000_tuner_writereg(state, 0x50, 0x00);
/* set low-pass filter period */
ds3000_tuner_writereg(state, 0x04, 0x2e);
ds3000_tuner_writereg(state, 0x51, 0x1b);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x04);
ds3000_tuner_writereg(state, 0x50, 0x00);
msleep(5);
f3db = ((c->symbol_rate / 1000) << 2) / 5 + 2000;
if ((c->symbol_rate / 1000) < 5000)
f3db += 3000;
if (f3db < 7000)
f3db = 7000;
if (f3db > 40000)
f3db = 40000;
/* set low-pass filter baseband */
value = ds3000_tuner_readreg(state, 0x26);
mlpf = 0x2e * 207 / ((value << 1) + 151);
mlpf_max = mlpf * 135 / 100;
mlpf_min = mlpf * 78 / 100;
if (mlpf_max > 63)
mlpf_max = 63;
/* rounded to the closest integer */
nlpf = ((mlpf * f3db * 1000) + (2766 * DS3000_XTAL_FREQ / 2))
/ (2766 * DS3000_XTAL_FREQ);
if (nlpf > 23)
nlpf = 23;
if (nlpf < 1)
nlpf = 1;
/* rounded to the closest integer */
mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
(1000 * f3db / 2)) / (1000 * f3db);
if (mlpf_new < mlpf_min) {
nlpf++;
mlpf_new = ((DS3000_XTAL_FREQ * nlpf * 2766) +
(1000 * f3db / 2)) / (1000 * f3db);
}
if (mlpf_new > mlpf_max)
mlpf_new = mlpf_max;
ds3000_tuner_writereg(state, 0x04, mlpf_new);
ds3000_tuner_writereg(state, 0x06, nlpf);
ds3000_tuner_writereg(state, 0x51, 0x1b);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x04);
ds3000_tuner_writereg(state, 0x50, 0x00);
msleep(5);
/* unknown */
ds3000_tuner_writereg(state, 0x51, 0x1e);
ds3000_tuner_writereg(state, 0x51, 0x1f);
ds3000_tuner_writereg(state, 0x50, 0x01);
ds3000_tuner_writereg(state, 0x50, 0x00);
msleep(60);
offset_khz = (ndiv - ndiv % 2 + 1024) * DS3000_XTAL_FREQ
/ (6 + 8) / (div4 + 1) / 2 - c->frequency;
/* ds3000 global reset */
ds3000_writereg(state, 0x07, 0x80);
ds3000_writereg(state, 0x07, 0x00);
/* ds3000 build-in uC reset */
ds3000_writereg(state, 0xb2, 0x01);
/* ds3000 software reset */
ds3000_writereg(state, 0x00, 0x01);
switch (c->delivery_system) {
case SYS_DVBS:
/* initialise the demod in DVB-S mode */
for (i = 0; i < sizeof(ds3000_dvbs_init_tab); i += 2)
ds3000_writereg(state,
ds3000_dvbs_init_tab[i],
ds3000_dvbs_init_tab[i + 1]);
value = ds3000_readreg(state, 0xfe);
value &= 0xc0;
value |= 0x1b;
ds3000_writereg(state, 0xfe, value);
break;
case SYS_DVBS2:
/* initialise the demod in DVB-S2 mode */
for (i = 0; i < sizeof(ds3000_dvbs2_init_tab); i += 2)
ds3000_writereg(state,
ds3000_dvbs2_init_tab[i],
ds3000_dvbs2_init_tab[i + 1]);
ds3000_writereg(state, 0xfe, 0x98);
break;
default:
return 1;
}
/* enable 27MHz clock output */
ds3000_writereg(state, 0x29, 0x80);
/* enable ac coupling */
ds3000_writereg(state, 0x25, 0x8a);
/* enhance symbol rate performance */
if ((c->symbol_rate / 1000) <= 5000) {
value = 29777 / (c->symbol_rate / 1000) + 1;
if (value % 2 != 0)
value++;
ds3000_writereg(state, 0xc3, 0x0d);
ds3000_writereg(state, 0xc8, value);
ds3000_writereg(state, 0xc4, 0x10);
ds3000_writereg(state, 0xc7, 0x0e);
} else if ((c->symbol_rate / 1000) <= 10000) {
value = 92166 / (c->symbol_rate / 1000) + 1;
if (value % 2 != 0)
value++;
ds3000_writereg(state, 0xc3, 0x07);
ds3000_writereg(state, 0xc8, value);
ds3000_writereg(state, 0xc4, 0x09);
ds3000_writereg(state, 0xc7, 0x12);
} else if ((c->symbol_rate / 1000) <= 20000) {
value = 64516 / (c->symbol_rate / 1000) + 1;
ds3000_writereg(state, 0xc3, value);
ds3000_writereg(state, 0xc8, 0x0e);
ds3000_writereg(state, 0xc4, 0x07);
ds3000_writereg(state, 0xc7, 0x18);
} else {
value = 129032 / (c->symbol_rate / 1000) + 1;
ds3000_writereg(state, 0xc3, value);
ds3000_writereg(state, 0xc8, 0x0a);
ds3000_writereg(state, 0xc4, 0x05);
ds3000_writereg(state, 0xc7, 0x24);
}
/* normalized symbol rate rounded to the closest integer */
value = (((c->symbol_rate / 1000) << 16) +
(DS3000_SAMPLE_RATE / 2)) / DS3000_SAMPLE_RATE;
ds3000_writereg(state, 0x61, value & 0x00ff);
ds3000_writereg(state, 0x62, (value & 0xff00) >> 8);
/* co-channel interference cancellation disabled */
ds3000_writereg(state, 0x56, 0x00);
/* equalizer disabled */
ds3000_writereg(state, 0x76, 0x00);
/*ds3000_writereg(state, 0x08, 0x03);
ds3000_writereg(state, 0xfd, 0x22);
ds3000_writereg(state, 0x08, 0x07);
ds3000_writereg(state, 0xfd, 0x42);
ds3000_writereg(state, 0x08, 0x07);*/
if (state->config->ci_mode) {
switch (c->delivery_system) {
case SYS_DVBS:
default:
ds3000_writereg(state, 0xfd, 0x80);
break;
case SYS_DVBS2:
ds3000_writereg(state, 0xfd, 0x01);
break;
}
}
/* ds3000 out of software reset */
ds3000_writereg(state, 0x00, 0x00);
/* start ds3000 build-in uC */
ds3000_writereg(state, 0xb2, 0x00);
ds3000_set_carrier_offset(fe, offset_khz);
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
if (status & FE_HAS_LOCK)
break;
msleep(10);
}
return 0;
}
static int ds3000_tune(struct dvb_frontend *fe,
bool re_tune,
unsigned int mode_flags,
unsigned int *delay,
fe_status_t *status)
{
if (re_tune) {
int ret = ds3000_set_frontend(fe);
if (ret)
return ret;
}
*delay = HZ / 5;
return ds3000_read_status(fe, status);
}
static enum dvbfe_algo ds3000_get_algo(struct dvb_frontend *fe)
{
dprintk("%s()\n", __func__);
return DVBFE_ALGO_HW;
}
/*
* Initialise or wake up device
*
* Power config will reset and load initial firmware if required
*/
static int ds3000_initfe(struct dvb_frontend *fe)
{
struct ds3000_state *state = fe->demodulator_priv;
int ret;
dprintk("%s()\n", __func__);
/* hard reset */
ds3000_writereg(state, 0x08, 0x01 | ds3000_readreg(state, 0x08));
msleep(1);
/* TS2020 init */
ds3000_tuner_writereg(state, 0x42, 0x73);
ds3000_tuner_writereg(state, 0x05, 0x01);
ds3000_tuner_writereg(state, 0x62, 0xf5);
/* Load the firmware if required */
ret = ds3000_firmware_ondemand(fe);
if (ret != 0) {
printk(KERN_ERR "%s: Unable initialize firmware\n", __func__);
return ret;
}
return 0;
}
/* Put device to sleep */
static int ds3000_sleep(struct dvb_frontend *fe)
{
dprintk("%s()\n", __func__);
return 0;
}
static struct dvb_frontend_ops ds3000_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2},
.info = {
.name = "Montage Technology DS3000/TS2020",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 1011, /* kHz for QPSK frontends */
.frequency_tolerance = 5000,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_2G_MODULATION |
FE_CAN_QPSK | FE_CAN_RECOVER
},
.release = ds3000_release,
.init = ds3000_initfe,
.sleep = ds3000_sleep,
.read_status = ds3000_read_status,
.read_ber = ds3000_read_ber,
.read_signal_strength = ds3000_read_signal_strength,
.read_snr = ds3000_read_snr,
.read_ucblocks = ds3000_read_ucblocks,
.set_voltage = ds3000_set_voltage,
.set_tone = ds3000_set_tone,
.diseqc_send_master_cmd = ds3000_send_diseqc_msg,
.diseqc_send_burst = ds3000_diseqc_send_burst,
.get_frontend_algo = ds3000_get_algo,
.set_frontend = ds3000_set_frontend,
.tune = ds3000_tune,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
"DS3000/TS2020 hardware");
MODULE_AUTHOR("Konstantin Dimitrov");
MODULE_LICENSE("GPL");
| gpl-2.0 |
HPTesla/Viper | drivers/crypto/hifn_795x.c | 4874 | 78573 | /*
* 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
#include <linux/ktime.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/kmap_types.h>
//#define HIFN_DEBUG
#ifdef HIFN_DEBUG
#define dprintk(f, a...) printk(f, ##a)
#else
#define dprintk(f, a...) do {} while (0)
#endif
static char hifn_pll_ref[sizeof("extNNN")] = "ext";
module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
MODULE_PARM_DESC(hifn_pll_ref,
"PLL reference clock (pci[freq] or ext[freq], default ext)");
static atomic_t hifn_dev_number;
#define ACRYPTO_OP_DECRYPT 0
#define ACRYPTO_OP_ENCRYPT 1
#define ACRYPTO_OP_HMAC 2
#define ACRYPTO_OP_RNG 3
#define ACRYPTO_MODE_ECB 0
#define ACRYPTO_MODE_CBC 1
#define ACRYPTO_MODE_CFB 2
#define ACRYPTO_MODE_OFB 3
#define ACRYPTO_TYPE_AES_128 0
#define ACRYPTO_TYPE_AES_192 1
#define ACRYPTO_TYPE_AES_256 2
#define ACRYPTO_TYPE_3DES 3
#define ACRYPTO_TYPE_DES 4
#define PCI_VENDOR_ID_HIFN 0x13A3
#define PCI_DEVICE_ID_HIFN_7955 0x0020
#define PCI_DEVICE_ID_HIFN_7956 0x001d
/* I/O region sizes */
#define HIFN_BAR0_SIZE 0x1000
#define HIFN_BAR1_SIZE 0x2000
#define HIFN_BAR2_SIZE 0x8000
/* DMA registres */
#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
#define HIFN_CHIP_ID 0x98 /* Chip ID */
/*
* Processing Unit Registers (offset from BASEREG0)
*/
#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
#define HIFN_0_SPACESIZE 0x20 /* Register space size */
/* Processing Unit Control Register (HIFN_0_PUCTRL) */
#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
/* FIFO Status Register (HIFN_0_FIFOSTAT) */
#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
/*
* DMA Interface Registers (offset from BASEREG1)
*/
#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
#define HIFN_1_PLL 0x4c /* 795x: PLL config */
#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
#define HIFN_1_REVID 0x98 /* Revision ID */
#define HIFN_1_UNLOCK_SECRET1 0xf4
#define HIFN_1_UNLOCK_SECRET2 0xfc
#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
#define HIFN_1_PUB_OP 0x308 /* Public Operand */
#define HIFN_1_PUB_STATUS 0x30c /* Public Status */
#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
#define HIFN_1_RNG_DATA 0x318 /* RNG data */
#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
#define HIFN_DMACNFG_UNLOCK 0x00000800
#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
/* PLL configuration register */
#define HIFN_PLL_REF_CLK_HBI 0x00000000 /* HBI reference clock */
#define HIFN_PLL_REF_CLK_PLL 0x00000001 /* PLL reference clock */
#define HIFN_PLL_BP 0x00000002 /* Reference clock bypass */
#define HIFN_PLL_PK_CLK_HBI 0x00000000 /* PK engine HBI clock */
#define HIFN_PLL_PK_CLK_PLL 0x00000008 /* PK engine PLL clock */
#define HIFN_PLL_PE_CLK_HBI 0x00000000 /* PE engine HBI clock */
#define HIFN_PLL_PE_CLK_PLL 0x00000010 /* PE engine PLL clock */
#define HIFN_PLL_RESERVED_1 0x00000400 /* Reserved bit, must be 1 */
#define HIFN_PLL_ND_SHIFT 11 /* Clock multiplier shift */
#define HIFN_PLL_ND_MULT_2 0x00000000 /* PLL clock multiplier 2 */
#define HIFN_PLL_ND_MULT_4 0x00000800 /* PLL clock multiplier 4 */
#define HIFN_PLL_ND_MULT_6 0x00001000 /* PLL clock multiplier 6 */
#define HIFN_PLL_ND_MULT_8 0x00001800 /* PLL clock multiplier 8 */
#define HIFN_PLL_ND_MULT_10 0x00002000 /* PLL clock multiplier 10 */
#define HIFN_PLL_ND_MULT_12 0x00002800 /* PLL clock multiplier 12 */
#define HIFN_PLL_IS_1_8 0x00000000 /* charge pump (mult. 1-8) */
#define HIFN_PLL_IS_9_12 0x00010000 /* charge pump (mult. 9-12) */
#define HIFN_PLL_FCK_MAX 266 /* Maximum PLL frequency */
/* Public key reset register (HIFN_1_PUB_RESET) */
#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
/* Public base address register (HIFN_1_PUB_BASE) */
#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
/* Public operand length register (HIFN_1_PUB_OPLEN) */
#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
#define HIFN_PUBOPLEN_EXP_S 7 /* exponent length shift */
#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
/* Public operation register (HIFN_1_PUB_OP) */
#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
/* Public status register (HIFN_1_PUB_STATUS) */
#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
/* Public interrupt enable register (HIFN_1_PUB_IEN) */
#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
/* Random number generator config register (HIFN_1_RNG_CONFIG) */
#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
#define HIFN_NAMESIZE 32
#define HIFN_MAX_RESULT_ORDER 5
#define HIFN_D_CMD_RSIZE 24*1
#define HIFN_D_SRC_RSIZE 80*1
#define HIFN_D_DST_RSIZE 80*1
#define HIFN_D_RES_RSIZE 24*1
#define HIFN_D_DST_DALIGN 4
#define HIFN_QUEUE_LENGTH (HIFN_D_CMD_RSIZE - 1)
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
#define HIFN_DES_KEY_LENGTH 8
#define HIFN_3DES_KEY_LENGTH 24
#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
#define HIFN_IV_LENGTH 8
#define HIFN_AES_IV_LENGTH 16
#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
#define HIFN_MAC_KEY_LENGTH 64
#define HIFN_MD5_LENGTH 16
#define HIFN_SHA1_LENGTH 20
#define HIFN_MAC_TRUNC_LENGTH 12
#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
#define HIFN_USED_RESULT 12
struct hifn_desc
{
volatile __le32 l;
volatile __le32 p;
};
struct hifn_dma {
struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
/*
* Our current positions for insertion and removal from the descriptor
* rings.
*/
volatile int cmdi, srci, dsti, resi;
volatile int cmdu, srcu, dstu, resu;
int cmdk, srck, dstk, resk;
};
#define HIFN_FLAG_CMD_BUSY (1<<0)
#define HIFN_FLAG_SRC_BUSY (1<<1)
#define HIFN_FLAG_DST_BUSY (1<<2)
#define HIFN_FLAG_RES_BUSY (1<<3)
#define HIFN_FLAG_OLD_KEY (1<<4)
#define HIFN_DEFAULT_ACTIVE_NUM 5
struct hifn_device
{
char name[HIFN_NAMESIZE];
int irq;
struct pci_dev *pdev;
void __iomem *bar[3];
void *desc_virt;
dma_addr_t desc_dma;
u32 dmareg;
void *sa[HIFN_D_RES_RSIZE];
spinlock_t lock;
u32 flags;
int active, started;
struct delayed_work work;
unsigned long reset;
unsigned long success;
unsigned long prev_success;
u8 snum;
struct tasklet_struct tasklet;
struct crypto_queue queue;
struct list_head alg_list;
unsigned int pk_clk_freq;
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
unsigned int rng_wait_time;
ktime_t rngtime;
struct hwrng rng;
#endif
};
#define HIFN_D_LENGTH 0x0000ffff
#define HIFN_D_NOINVALID 0x01000000
#define HIFN_D_MASKDONEIRQ 0x02000000
#define HIFN_D_DESTOVER 0x04000000
#define HIFN_D_OVER 0x08000000
#define HIFN_D_LAST 0x20000000
#define HIFN_D_JUMP 0x40000000
#define HIFN_D_VALID 0x80000000
struct hifn_base_command
{
volatile __le16 masks;
volatile __le16 session_num;
volatile __le16 total_source_count;
volatile __le16 total_dest_count;
};
#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
#define HIFN_BASE_CMD_DECODE 0x2000
#define HIFN_BASE_CMD_SRCLEN_M 0xc000
#define HIFN_BASE_CMD_SRCLEN_S 14
#define HIFN_BASE_CMD_DSTLEN_M 0x3000
#define HIFN_BASE_CMD_DSTLEN_S 12
#define HIFN_BASE_CMD_LENMASK_HI 0x30000
#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
/*
* Structure to help build up the command data structure.
*/
struct hifn_crypt_command
{
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
#define HIFN_CRYPT_CMD_SRCLEN_S 14
/*
* Structure to help build up the command data structure.
*/
struct hifn_mac_command
{
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_MAC_CMD_ALG_MASK 0x0001
#define HIFN_MAC_CMD_ALG_SHA1 0x0000
#define HIFN_MAC_CMD_ALG_MD5 0x0001
#define HIFN_MAC_CMD_MODE_MASK 0x000c
#define HIFN_MAC_CMD_MODE_HMAC 0x0000
#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
#define HIFN_MAC_CMD_MODE_HASH 0x0008
#define HIFN_MAC_CMD_MODE_FULL 0x0004
#define HIFN_MAC_CMD_TRUNC 0x0010
#define HIFN_MAC_CMD_RESULT 0x0020
#define HIFN_MAC_CMD_APPEND 0x0040
#define HIFN_MAC_CMD_SRCLEN_M 0xc000
#define HIFN_MAC_CMD_SRCLEN_S 14
/*
* MAC POS IPsec initiates authentication after encryption on encodes
* and before decryption on decodes.
*/
#define HIFN_MAC_CMD_POS_IPSEC 0x0200
#define HIFN_MAC_CMD_NEW_KEY 0x0800
struct hifn_comp_command
{
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_COMP_CMD_SRCLEN_M 0xc000
#define HIFN_COMP_CMD_SRCLEN_S 14
#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
struct hifn_base_result
{
volatile __le16 flags;
volatile __le16 session;
volatile __le16 src_cnt; /* 15:0 of source count */
volatile __le16 dst_cnt; /* 15:0 of dest count */
};
#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
#define HIFN_BASE_RES_SRCLEN_S 14
#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
#define HIFN_BASE_RES_DSTLEN_S 12
struct hifn_comp_result
{
volatile __le16 flags;
volatile __le16 crc;
};
#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
#define HIFN_COMP_RES_LCB_S 8
#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
struct hifn_mac_result
{
volatile __le16 flags;
volatile __le16 reserved;
/* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
};
#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
struct hifn_crypt_result
{
volatile __le16 flags;
volatile __le16 reserved;
};
#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
#ifndef HIFN_POLL_FREQUENCY
#define HIFN_POLL_FREQUENCY 0x1
#endif
#ifndef HIFN_POLL_SCALAR
#define HIFN_POLL_SCALAR 0x0
#endif
#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
struct hifn_crypto_alg
{
struct list_head entry;
struct crypto_alg alg;
struct hifn_device *dev;
};
#define ASYNC_SCATTERLIST_CACHE 16
#define ASYNC_FLAGS_MISALIGNED (1<<0)
struct hifn_cipher_walk
{
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
u32 flags;
int num;
};
struct hifn_context
{
u8 key[HIFN_MAX_CRYPT_KEY_LENGTH];
struct hifn_device *dev;
unsigned int keysize;
};
struct hifn_request_context
{
u8 *iv;
unsigned int ivsize;
u8 op, type, mode, unused;
struct hifn_cipher_walk walk;
};
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
{
u32 ret;
ret = readl(dev->bar[0] + reg);
return ret;
}
static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
{
u32 ret;
ret = readl(dev->bar[1] + reg);
return ret;
}
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
{
writel((__force u32)cpu_to_le32(val), dev->bar[0] + reg);
}
static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
{
writel((__force u32)cpu_to_le32(val), dev->bar[1] + reg);
}
static void hifn_wait_puc(struct hifn_device *dev)
{
int i;
u32 ret;
for (i=10000; i > 0; --i) {
ret = hifn_read_0(dev, HIFN_0_PUCTRL);
if (!(ret & HIFN_PUCTRL_RESET))
break;
udelay(1);
}
if (!i)
dprintk("%s: Failed to reset PUC unit.\n", dev->name);
}
static void hifn_reset_puc(struct hifn_device *dev)
{
hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
hifn_wait_puc(dev);
}
static void hifn_stop_device(struct hifn_device *dev)
{
hifn_write_1(dev, HIFN_1_DMA_CSR,
HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
hifn_write_0(dev, HIFN_0_PUIER, 0);
hifn_write_1(dev, HIFN_1_DMA_IER, 0);
}
static void hifn_reset_dma(struct hifn_device *dev, int full)
{
hifn_stop_device(dev);
/*
* Setting poll frequency and others to 0.
*/
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
mdelay(1);
/*
* Reset DMA.
*/
if (full) {
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
mdelay(1);
} else {
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
HIFN_DMACNFG_MSTRESET);
hifn_reset_puc(dev);
}
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
hifn_reset_puc(dev);
}
static u32 hifn_next_signature(u_int32_t a, u_int cnt)
{
int i;
u32 v;
for (i = 0; i < cnt; i++) {
/* get the parity */
v = a & 0x80080125;
v ^= v >> 16;
v ^= v >> 8;
v ^= v >> 4;
v ^= v >> 2;
v ^= v >> 1;
a = (v & 1) ^ (a << 1);
}
return a;
}
static struct pci2id {
u_short pci_vendor;
u_short pci_prod;
char card_id[13];
} pci2id[] = {
{
PCI_VENDOR_ID_HIFN,
PCI_DEVICE_ID_HIFN_7955,
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00 }
},
{
PCI_VENDOR_ID_HIFN,
PCI_DEVICE_ID_HIFN_7956,
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00 }
}
};
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
static int hifn_rng_data_present(struct hwrng *rng, int wait)
{
struct hifn_device *dev = (struct hifn_device *)rng->priv;
s64 nsec;
nsec = ktime_to_ns(ktime_sub(ktime_get(), dev->rngtime));
nsec -= dev->rng_wait_time;
if (nsec <= 0)
return 1;
if (!wait)
return 0;
ndelay(nsec);
return 1;
}
static int hifn_rng_data_read(struct hwrng *rng, u32 *data)
{
struct hifn_device *dev = (struct hifn_device *)rng->priv;
*data = hifn_read_1(dev, HIFN_1_RNG_DATA);
dev->rngtime = ktime_get();
return 4;
}
static int hifn_register_rng(struct hifn_device *dev)
{
/*
* We must wait at least 256 Pk_clk cycles between two reads of the rng.
*/
dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
256;
dev->rng.name = dev->name;
dev->rng.data_present = hifn_rng_data_present,
dev->rng.data_read = hifn_rng_data_read,
dev->rng.priv = (unsigned long)dev;
return hwrng_register(&dev->rng);
}
static void hifn_unregister_rng(struct hifn_device *dev)
{
hwrng_unregister(&dev->rng);
}
#else
#define hifn_register_rng(dev) 0
#define hifn_unregister_rng(dev)
#endif
static int hifn_init_pubrng(struct hifn_device *dev)
{
int i;
hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
HIFN_PUBRST_RESET);
for (i=100; i > 0; --i) {
mdelay(1);
if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
break;
}
if (!i)
dprintk("Chip %s: Failed to initialise public key engine.\n",
dev->name);
else {
hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
dev->dmareg |= HIFN_DMAIER_PUBDONE;
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
dprintk("Chip %s: Public key engine has been successfully "
"initialised.\n", dev->name);
}
/*
* Enable RNG engine.
*/
hifn_write_1(dev, HIFN_1_RNG_CONFIG,
hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
dprintk("Chip %s: RNG engine has been successfully initialised.\n",
dev->name);
#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
/* First value must be discarded */
hifn_read_1(dev, HIFN_1_RNG_DATA);
dev->rngtime = ktime_get();
#endif
return 0;
}
static int hifn_enable_crypto(struct hifn_device *dev)
{
u32 dmacfg, addr;
char *offtbl = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(pci2id); i++) {
if (pci2id[i].pci_vendor == dev->pdev->vendor &&
pci2id[i].pci_prod == dev->pdev->device) {
offtbl = pci2id[i].card_id;
break;
}
}
if (offtbl == NULL) {
dprintk("Chip %s: Unknown card!\n", dev->name);
return -ENODEV;
}
dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
hifn_write_1(dev, HIFN_1_DMA_CNFG,
HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
mdelay(1);
addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
mdelay(1);
hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
mdelay(1);
for (i=0; i<12; ++i) {
addr = hifn_next_signature(addr, offtbl[i] + 0x101);
hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
mdelay(1);
}
hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
return 0;
}
static void hifn_init_dma(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
u32 dptr = dev->desc_dma;
int i;
for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
dma->cmdr[i].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, command_bufs[i][0]));
for (i=0; i<HIFN_D_RES_RSIZE; ++i)
dma->resr[i].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, result_bufs[i][0]));
/*
* Setup LAST descriptors.
*/
dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, cmdr[0]));
dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, srcr[0]));
dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, dstr[0]));
dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
offsetof(struct hifn_dma, resr[0]));
dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
}
/*
* Initialize the PLL. We need to know the frequency of the reference clock
* to calculate the optimal multiplier. For PCI we assume 66MHz, since that
* allows us to operate without the risk of overclocking the chip. If it
* actually uses 33MHz, the chip will operate at half the speed, this can be
* overriden by specifying the frequency as module parameter (pci33).
*
* Unfortunately the PCI clock is not very suitable since the HIFN needs a
* stable clock and the PCI clock frequency may vary, so the default is the
* external clock. There is no way to find out its frequency, we default to
* 66MHz since according to Mike Ham of HiFn, almost every board in existence
* has an external crystal populated at 66MHz.
*/
static void hifn_init_pll(struct hifn_device *dev)
{
unsigned int freq, m;
u32 pllcfg;
pllcfg = HIFN_1_PLL | HIFN_PLL_RESERVED_1;
if (strncmp(hifn_pll_ref, "ext", 3) == 0)
pllcfg |= HIFN_PLL_REF_CLK_PLL;
else
pllcfg |= HIFN_PLL_REF_CLK_HBI;
if (hifn_pll_ref[3] != '\0')
freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
else {
freq = 66;
printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, "
"override with hifn_pll_ref=%.3s<frequency>\n",
freq, hifn_pll_ref);
}
m = HIFN_PLL_FCK_MAX / freq;
pllcfg |= (m / 2 - 1) << HIFN_PLL_ND_SHIFT;
if (m <= 8)
pllcfg |= HIFN_PLL_IS_1_8;
else
pllcfg |= HIFN_PLL_IS_9_12;
/* Select clock source and enable clock bypass */
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI | HIFN_PLL_BP);
/* Let the chip lock to the input clock */
mdelay(10);
/* Disable clock bypass */
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
HIFN_PLL_PK_CLK_HBI | HIFN_PLL_PE_CLK_HBI);
/* Switch the engines to the PLL */
hifn_write_1(dev, HIFN_1_PLL, pllcfg |
HIFN_PLL_PK_CLK_PLL | HIFN_PLL_PE_CLK_PLL);
/*
* The Fpk_clk runs at half the total speed. Its frequency is needed to
* calculate the minimum time between two reads of the rng. Since 33MHz
* is actually 33.333... we overestimate the frequency here, resulting
* in slightly larger intervals.
*/
dev->pk_clk_freq = 1000000 * (freq + 1) * m / 2;
}
static void hifn_init_registers(struct hifn_device *dev)
{
u32 dptr = dev->desc_dma;
/* Initialization magic... */
hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
/* write all 4 ring address registers */
hifn_write_1(dev, HIFN_1_DMA_CRAR, dptr +
offsetof(struct hifn_dma, cmdr[0]));
hifn_write_1(dev, HIFN_1_DMA_SRAR, dptr +
offsetof(struct hifn_dma, srcr[0]));
hifn_write_1(dev, HIFN_1_DMA_DRAR, dptr +
offsetof(struct hifn_dma, dstr[0]));
hifn_write_1(dev, HIFN_1_DMA_RRAR, dptr +
offsetof(struct hifn_dma, resr[0]));
mdelay(2);
#if 0
hifn_write_1(dev, HIFN_1_DMA_CSR,
HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
HIFN_DMACSR_S_WAIT |
HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
HIFN_DMACSR_C_WAIT |
HIFN_DMACSR_ENGINE |
HIFN_DMACSR_PUBDONE);
#else
hifn_write_1(dev, HIFN_1_DMA_CSR,
HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
HIFN_DMACSR_S_WAIT |
HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
HIFN_DMACSR_C_WAIT |
HIFN_DMACSR_ENGINE |
HIFN_DMACSR_PUBDONE);
#endif
hifn_read_1(dev, HIFN_1_DMA_CSR);
dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
HIFN_DMAIER_ENGINE;
dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
hifn_read_1(dev, HIFN_1_DMA_IER);
#if 0
hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
HIFN_PUCNFG_DRAM);
#else
hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
#endif
hifn_init_pll(dev);
hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
}
static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
unsigned dlen, unsigned slen, u16 mask, u8 snum)
{
struct hifn_base_command *base_cmd;
u8 *buf_pos = buf;
base_cmd = (struct hifn_base_command *)buf_pos;
base_cmd->masks = __cpu_to_le16(mask);
base_cmd->total_source_count =
__cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
base_cmd->total_dest_count =
__cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
dlen >>= 16;
slen >>= 16;
base_cmd->session_num = __cpu_to_le16(snum |
((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
return sizeof(struct hifn_base_command);
}
static int hifn_setup_crypto_command(struct hifn_device *dev,
u8 *buf, unsigned dlen, unsigned slen,
u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
struct hifn_crypt_command *cry_cmd;
u8 *buf_pos = buf;
u16 cmd_len;
cry_cmd = (struct hifn_crypt_command *)buf_pos;
cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
dlen >>= 16;
cry_cmd->masks = __cpu_to_le16(mode |
((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
HIFN_CRYPT_CMD_SRCLEN_M));
cry_cmd->header_skip = 0;
cry_cmd->reserved = 0;
buf_pos += sizeof(struct hifn_crypt_command);
dma->cmdu++;
if (dma->cmdu > 1) {
dev->dmareg |= HIFN_DMAIER_C_WAIT;
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
}
if (keylen) {
memcpy(buf_pos, key, keylen);
buf_pos += keylen;
}
if (ivsize) {
memcpy(buf_pos, iv, ivsize);
buf_pos += ivsize;
}
cmd_len = buf_pos - buf;
return cmd_len;
}
static int hifn_setup_cmd_desc(struct hifn_device *dev,
struct hifn_context *ctx, struct hifn_request_context *rctx,
void *priv, unsigned int nbytes)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int cmd_len, sa_idx;
u8 *buf, *buf_pos;
u16 mask;
sa_idx = dma->cmdi;
buf_pos = buf = dma->command_bufs[dma->cmdi];
mask = 0;
switch (rctx->op) {
case ACRYPTO_OP_DECRYPT:
mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
break;
case ACRYPTO_OP_ENCRYPT:
mask = HIFN_BASE_CMD_CRYPT;
break;
case ACRYPTO_OP_HMAC:
mask = HIFN_BASE_CMD_MAC;
break;
default:
goto err_out;
}
buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
nbytes, mask, dev->snum);
if (rctx->op == ACRYPTO_OP_ENCRYPT || rctx->op == ACRYPTO_OP_DECRYPT) {
u16 md = 0;
if (ctx->keysize)
md |= HIFN_CRYPT_CMD_NEW_KEY;
if (rctx->iv && rctx->mode != ACRYPTO_MODE_ECB)
md |= HIFN_CRYPT_CMD_NEW_IV;
switch (rctx->mode) {
case ACRYPTO_MODE_ECB:
md |= HIFN_CRYPT_CMD_MODE_ECB;
break;
case ACRYPTO_MODE_CBC:
md |= HIFN_CRYPT_CMD_MODE_CBC;
break;
case ACRYPTO_MODE_CFB:
md |= HIFN_CRYPT_CMD_MODE_CFB;
break;
case ACRYPTO_MODE_OFB:
md |= HIFN_CRYPT_CMD_MODE_OFB;
break;
default:
goto err_out;
}
switch (rctx->type) {
case ACRYPTO_TYPE_AES_128:
if (ctx->keysize != 16)
goto err_out;
md |= HIFN_CRYPT_CMD_KSZ_128 |
HIFN_CRYPT_CMD_ALG_AES;
break;
case ACRYPTO_TYPE_AES_192:
if (ctx->keysize != 24)
goto err_out;
md |= HIFN_CRYPT_CMD_KSZ_192 |
HIFN_CRYPT_CMD_ALG_AES;
break;
case ACRYPTO_TYPE_AES_256:
if (ctx->keysize != 32)
goto err_out;
md |= HIFN_CRYPT_CMD_KSZ_256 |
HIFN_CRYPT_CMD_ALG_AES;
break;
case ACRYPTO_TYPE_3DES:
if (ctx->keysize != 24)
goto err_out;
md |= HIFN_CRYPT_CMD_ALG_3DES;
break;
case ACRYPTO_TYPE_DES:
if (ctx->keysize != 8)
goto err_out;
md |= HIFN_CRYPT_CMD_ALG_DES;
break;
default:
goto err_out;
}
buf_pos += hifn_setup_crypto_command(dev, buf_pos,
nbytes, nbytes, ctx->key, ctx->keysize,
rctx->iv, rctx->ivsize, md);
}
dev->sa[sa_idx] = priv;
dev->started++;
cmd_len = buf_pos - buf;
dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
dma->cmdr[dma->cmdi].l = __cpu_to_le32(
HIFN_D_VALID | HIFN_D_LAST |
HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
dma->cmdi = 0;
} else
dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
dev->flags |= HIFN_FLAG_CMD_BUSY;
}
return 0;
err_out:
return -EINVAL;
}
static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
unsigned int offset, unsigned int size, int last)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
idx = dma->srci;
dma->srcr[idx].p = __cpu_to_le32(addr);
dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
if (++idx == HIFN_D_SRC_RSIZE) {
dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
(last ? HIFN_D_LAST : 0));
idx = 0;
}
dma->srci = idx;
dma->srcu++;
if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
dev->flags |= HIFN_FLAG_SRC_BUSY;
}
return size;
}
static void hifn_setup_res_desc(struct hifn_device *dev)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
HIFN_D_VALID | HIFN_D_LAST);
/*
* dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
* HIFN_D_LAST);
*/
if (++dma->resi == HIFN_D_RES_RSIZE) {
dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
dma->resi = 0;
}
dma->resu++;
if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
dev->flags |= HIFN_FLAG_RES_BUSY;
}
}
static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
unsigned offset, unsigned size, int last)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int idx;
dma_addr_t addr;
addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
HIFN_D_MASKDONEIRQ | (last ? HIFN_D_LAST : 0));
if (++idx == HIFN_D_DST_RSIZE) {
dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
(last ? HIFN_D_LAST : 0));
idx = 0;
}
dma->dsti = idx;
dma->dstu++;
if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
dev->flags |= HIFN_FLAG_DST_BUSY;
}
}
static int hifn_setup_dma(struct hifn_device *dev,
struct hifn_context *ctx, struct hifn_request_context *rctx,
struct scatterlist *src, struct scatterlist *dst,
unsigned int nbytes, void *priv)
{
struct scatterlist *t;
struct page *spage, *dpage;
unsigned int soff, doff;
unsigned int n, len;
n = nbytes;
while (n) {
spage = sg_page(src);
soff = src->offset;
len = min(src->length, n);
hifn_setup_src_desc(dev, spage, soff, len, n - len == 0);
src++;
n -= len;
}
t = &rctx->walk.cache[0];
n = nbytes;
while (n) {
if (t->length && rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
BUG_ON(!sg_page(t));
dpage = sg_page(t);
doff = 0;
len = t->length;
} else {
BUG_ON(!sg_page(dst));
dpage = sg_page(dst);
doff = dst->offset;
len = dst->length;
}
len = min(len, n);
hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0);
dst++;
t++;
n -= len;
}
hifn_setup_cmd_desc(dev, ctx, rctx, priv, nbytes);
hifn_setup_res_desc(dev);
return 0;
}
static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
int num, gfp_t gfp_flags)
{
int i;
num = min(ASYNC_SCATTERLIST_CACHE, num);
sg_init_table(w->cache, num);
w->num = 0;
for (i=0; i<num; ++i) {
struct page *page = alloc_page(gfp_flags);
struct scatterlist *s;
if (!page)
break;
s = &w->cache[i];
sg_set_page(s, page, PAGE_SIZE, 0);
w->num++;
}
return i;
}
static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
{
int i;
for (i=0; i<w->num; ++i) {
struct scatterlist *s = &w->cache[i];
__free_page(sg_page(s));
s->length = 0;
}
w->num = 0;
}
static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
unsigned int size, unsigned int *nbytesp)
{
unsigned int copy, drest = *drestp, nbytes = *nbytesp;
int idx = 0;
if (drest < size || size > nbytes)
return -EINVAL;
while (size) {
copy = min3(drest, size, dst->length);
size -= copy;
drest -= copy;
nbytes -= copy;
dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
__func__, copy, size, drest, nbytes);
dst++;
idx++;
}
*nbytesp = nbytes;
*drestp = drest;
return idx;
}
static int hifn_cipher_walk(struct ablkcipher_request *req,
struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
unsigned int nbytes = req->nbytes, offset, copy, diff;
int idx, tidx, err;
tidx = idx = 0;
offset = 0;
while (nbytes) {
if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
return -EINVAL;
dst = &req->dst[idx];
dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n",
__func__, dst->length, dst->offset, offset, nbytes);
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) ||
offset) {
unsigned slen = min(dst->length - offset, nbytes);
unsigned dlen = PAGE_SIZE;
t = &w->cache[idx];
err = ablkcipher_add(&dlen, dst, slen, &nbytes);
if (err < 0)
return err;
idx += err;
copy = slen & ~(HIFN_D_DST_DALIGN - 1);
diff = slen & (HIFN_D_DST_DALIGN - 1);
if (dlen < nbytes) {
/*
* Destination page does not have enough space
* to put there additional blocksized chunk,
* so we mark that page as containing only
* blocksize aligned chunks:
* t->length = (slen & ~(HIFN_D_DST_DALIGN - 1));
* and increase number of bytes to be processed
* in next chunk:
* nbytes += diff;
*/
nbytes += diff;
/*
* Temporary of course...
* Kick author if you will catch this one.
*/
printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
"slen: %u, offset: %u.\n",
__func__, dlen, nbytes, slen, offset);
printk(KERN_ERR "%s: please contact author to fix this "
"issue, generally you should not catch "
"this path under any condition but who "
"knows how did you use crypto code.\n"
"Thank you.\n", __func__);
BUG();
} else {
copy += diff + nbytes;
dst = &req->dst[idx];
err = ablkcipher_add(&dlen, dst, nbytes, &nbytes);
if (err < 0)
return err;
idx += err;
}
t->length = copy;
t->offset = offset;
} else {
nbytes -= min(dst->length, nbytes);
idx++;
}
tidx++;
}
return tidx;
}
static int hifn_setup_session(struct ablkcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
struct hifn_device *dev = ctx->dev;
unsigned long dlen, flags;
unsigned int nbytes = req->nbytes, idx = 0;
int err = -EINVAL, sg_num;
struct scatterlist *dst;
if (rctx->iv && !rctx->ivsize && rctx->mode != ACRYPTO_MODE_ECB)
goto err_out_exit;
rctx->walk.flags = 0;
while (nbytes) {
dst = &req->dst[idx];
dlen = min(dst->length, nbytes);
if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) ||
!IS_ALIGNED(dlen, HIFN_D_DST_DALIGN))
rctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
nbytes -= dlen;
idx++;
}
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
if (err < 0)
return err;
}
sg_num = hifn_cipher_walk(req, &rctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
}
spin_lock_irqsave(&dev->lock, flags);
if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
err = -EAGAIN;
goto err_out;
}
err = hifn_setup_dma(dev, ctx, rctx, req->src, req->dst, req->nbytes, req);
if (err)
goto err_out;
dev->snum++;
dev->active = HIFN_DEFAULT_ACTIVE_NUM;
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
err_out:
spin_unlock_irqrestore(&dev->lock, flags);
err_out_exit:
if (err) {
printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
"type: %u, err: %d.\n",
dev->name, rctx->iv, rctx->ivsize,
ctx->key, ctx->keysize,
rctx->mode, rctx->op, rctx->type, err);
}
return err;
}
static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
{
int n, err;
u8 src[16];
struct hifn_context ctx;
struct hifn_request_context rctx;
u8 fips_aes_ecb_from_zero[16] = {
0x66, 0xE9, 0x4B, 0xD4,
0xEF, 0x8A, 0x2C, 0x3B,
0x88, 0x4C, 0xFA, 0x59,
0xCA, 0x34, 0x2B, 0x2E};
struct scatterlist sg;
memset(src, 0, sizeof(src));
memset(ctx.key, 0, sizeof(ctx.key));
ctx.dev = dev;
ctx.keysize = 16;
rctx.ivsize = 0;
rctx.iv = NULL;
rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
rctx.mode = ACRYPTO_MODE_ECB;
rctx.type = ACRYPTO_TYPE_AES_128;
rctx.walk.cache[0].length = 0;
sg_init_one(&sg, &src, sizeof(src));
err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL);
if (err)
goto err_out;
dev->started = 0;
msleep(200);
dprintk("%s: decoded: ", dev->name);
for (n=0; n<sizeof(src); ++n)
dprintk("%02x ", src[n]);
dprintk("\n");
dprintk("%s: FIPS : ", dev->name);
for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
dprintk("%02x ", fips_aes_ecb_from_zero[n]);
dprintk("\n");
if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
"passed.\n", dev->name);
return 0;
}
err_out:
printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
return -1;
}
static int hifn_start_device(struct hifn_device *dev)
{
int err;
dev->started = dev->active = 0;
hifn_reset_dma(dev, 1);
err = hifn_enable_crypto(dev);
if (err)
return err;
hifn_reset_puc(dev);
hifn_init_dma(dev);
hifn_init_registers(dev);
hifn_init_pubrng(dev);
return 0;
}
static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
{
unsigned int srest = *srestp, nbytes = *nbytesp, copy;
void *daddr;
int idx = 0;
if (srest < size || size > nbytes)
return -EINVAL;
while (size) {
copy = min3(srest, dst->length, size);
daddr = kmap_atomic(sg_page(dst));
memcpy(daddr + dst->offset + offset, saddr, copy);
kunmap_atomic(daddr);
nbytes -= copy;
size -= copy;
srest -= copy;
saddr += copy;
offset = 0;
dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
__func__, copy, size, srest, nbytes);
dst++;
idx++;
}
*nbytesp = nbytes;
*srestp = srest;
return idx;
}
static inline void hifn_complete_sa(struct hifn_device *dev, int i)
{
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->sa[i] = NULL;
dev->started--;
if (dev->started < 0)
printk("%s: started: %d.\n", __func__, dev->started);
spin_unlock_irqrestore(&dev->lock, flags);
BUG_ON(dev->started < 0);
}
static void hifn_process_ready(struct ablkcipher_request *req, int error)
{
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
unsigned int nbytes = req->nbytes;
int idx = 0, err;
struct scatterlist *dst, *t;
void *saddr;
while (nbytes) {
t = &rctx->walk.cache[idx];
dst = &req->dst[idx];
dprintk("\n%s: sg_page(t): %p, t->length: %u, "
"sg_page(dst): %p, dst->length: %u, "
"nbytes: %u.\n",
__func__, sg_page(t), t->length,
sg_page(dst), dst->length, nbytes);
if (!t->length) {
nbytes -= min(dst->length, nbytes);
idx++;
continue;
}
saddr = kmap_atomic(sg_page(t));
err = ablkcipher_get(saddr, &t->length, t->offset,
dst, nbytes, &nbytes);
if (err < 0) {
kunmap_atomic(saddr);
break;
}
idx += err;
kunmap_atomic(saddr);
}
hifn_cipher_walk_exit(&rctx->walk);
}
req->base.complete(&req->base, error);
}
static void hifn_clear_rings(struct hifn_device *dev, int error)
{
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i, u;
dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
"k: %d.%d.%d.%d.\n",
dev->name,
dma->cmdi, dma->srci, dma->dsti, dma->resi,
dma->cmdu, dma->srcu, dma->dstu, dma->resu,
dma->cmdk, dma->srck, dma->dstk, dma->resk);
i = dma->resk; u = dma->resu;
while (u != 0) {
if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
break;
if (dev->sa[i]) {
dev->success++;
dev->reset = 0;
hifn_process_ready(dev->sa[i], error);
hifn_complete_sa(dev, i);
}
if (++i == HIFN_D_RES_RSIZE)
i = 0;
u--;
}
dma->resk = i; dma->resu = u;
i = dma->srck; u = dma->srcu;
while (u != 0) {
if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
break;
if (++i == HIFN_D_SRC_RSIZE)
i = 0;
u--;
}
dma->srck = i; dma->srcu = u;
i = dma->cmdk; u = dma->cmdu;
while (u != 0) {
if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
break;
if (++i == HIFN_D_CMD_RSIZE)
i = 0;
u--;
}
dma->cmdk = i; dma->cmdu = u;
i = dma->dstk; u = dma->dstu;
while (u != 0) {
if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
break;
if (++i == HIFN_D_DST_RSIZE)
i = 0;
u--;
}
dma->dstk = i; dma->dstu = u;
dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
"k: %d.%d.%d.%d.\n",
dev->name,
dma->cmdi, dma->srci, dma->dsti, dma->resi,
dma->cmdu, dma->srcu, dma->dstu, dma->resu,
dma->cmdk, dma->srck, dma->dstk, dma->resk);
}
static void hifn_work(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct hifn_device *dev = container_of(dw, struct hifn_device, work);
unsigned long flags;
int reset = 0;
u32 r = 0;
spin_lock_irqsave(&dev->lock, flags);
if (dev->active == 0) {
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
dev->flags &= ~HIFN_FLAG_CMD_BUSY;
r |= HIFN_DMACSR_C_CTRL_DIS;
}
if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
dev->flags &= ~HIFN_FLAG_SRC_BUSY;
r |= HIFN_DMACSR_S_CTRL_DIS;
}
if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
dev->flags &= ~HIFN_FLAG_DST_BUSY;
r |= HIFN_DMACSR_D_CTRL_DIS;
}
if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
dev->flags &= ~HIFN_FLAG_RES_BUSY;
r |= HIFN_DMACSR_R_CTRL_DIS;
}
if (r)
hifn_write_1(dev, HIFN_1_DMA_CSR, r);
} else
dev->active--;
if ((dev->prev_success == dev->success) && dev->started)
reset = 1;
dev->prev_success = dev->success;
spin_unlock_irqrestore(&dev->lock, flags);
if (reset) {
if (++dev->reset >= 5) {
int i;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
printk("%s: r: %08x, active: %d, started: %d, "
"success: %lu: qlen: %u/%u, reset: %d.\n",
dev->name, r, dev->active, dev->started,
dev->success, dev->queue.qlen, dev->queue.max_qlen,
reset);
printk("%s: res: ", __func__);
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
printk("%x.%p ", dma->resr[i].l, dev->sa[i]);
if (dev->sa[i]) {
hifn_process_ready(dev->sa[i], -ENODEV);
hifn_complete_sa(dev, i);
}
}
printk("\n");
hifn_reset_dma(dev, 1);
hifn_stop_device(dev);
hifn_start_device(dev);
dev->reset = 0;
}
tasklet_schedule(&dev->tasklet);
}
schedule_delayed_work(&dev->work, HZ);
}
static irqreturn_t hifn_interrupt(int irq, void *data)
{
struct hifn_device *dev = (struct hifn_device *)data;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
u32 dmacsr, restart;
dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
"i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
dma->cmdi, dma->srci, dma->dsti, dma->resi,
dma->cmdu, dma->srcu, dma->dstu, dma->resu);
if ((dmacsr & dev->dmareg) == 0)
return IRQ_NONE;
hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
if (dmacsr & HIFN_DMACSR_ENGINE)
hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
if (dmacsr & HIFN_DMACSR_PUBDONE)
hifn_write_1(dev, HIFN_1_PUB_STATUS,
hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
if (restart) {
u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
!!(dmacsr & HIFN_DMACSR_D_OVER),
puisr, !!(puisr & HIFN_PUISR_DSTOVER));
if (!!(puisr & HIFN_PUISR_DSTOVER))
hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
HIFN_DMACSR_D_OVER));
}
restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
if (restart) {
printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
!!(dmacsr & HIFN_DMACSR_S_ABORT),
!!(dmacsr & HIFN_DMACSR_D_ABORT),
!!(dmacsr & HIFN_DMACSR_R_ABORT));
hifn_reset_dma(dev, 1);
hifn_init_dma(dev);
hifn_init_registers(dev);
}
if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
dprintk("%s: wait on command.\n", dev->name);
dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
}
tasklet_schedule(&dev->tasklet);
return IRQ_HANDLED;
}
static void hifn_flush(struct hifn_device *dev)
{
unsigned long flags;
struct crypto_async_request *async_req;
struct ablkcipher_request *req;
struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
int i;
for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
struct hifn_desc *d = &dma->resr[i];
if (dev->sa[i]) {
hifn_process_ready(dev->sa[i],
(d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
hifn_complete_sa(dev, i);
}
}
spin_lock_irqsave(&dev->lock, flags);
while ((async_req = crypto_dequeue_request(&dev->queue))) {
req = container_of(async_req, struct ablkcipher_request, base);
spin_unlock_irqrestore(&dev->lock, flags);
hifn_process_ready(req, -ENODEV);
spin_lock_irqsave(&dev->lock, flags);
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int len)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct hifn_context *ctx = crypto_tfm_ctx(tfm);
struct hifn_device *dev = ctx->dev;
if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -1;
}
if (len == HIFN_DES_KEY_LENGTH) {
u32 tmp[DES_EXPKEY_WORDS];
int ret = des_ekey(tmp, key);
if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
}
dev->flags &= ~HIFN_FLAG_OLD_KEY;
memcpy(ctx->key, key, len);
ctx->keysize = len;
return 0;
}
static int hifn_handle_req(struct ablkcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
int err = -EAGAIN;
if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
err = hifn_setup_session(req);
if (err == -EAGAIN) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
err = ablkcipher_enqueue_request(&dev->queue, req);
spin_unlock_irqrestore(&dev->lock, flags);
}
return err;
}
static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
u8 type, u8 mode)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_request_context *rctx = ablkcipher_request_ctx(req);
unsigned ivsize;
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
if (req->info && mode != ACRYPTO_MODE_ECB) {
if (type == ACRYPTO_TYPE_AES_128)
ivsize = HIFN_AES_IV_LENGTH;
else if (type == ACRYPTO_TYPE_DES)
ivsize = HIFN_DES_KEY_LENGTH;
else if (type == ACRYPTO_TYPE_3DES)
ivsize = HIFN_3DES_KEY_LENGTH;
}
if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
if (ctx->keysize == 24)
type = ACRYPTO_TYPE_AES_192;
else if (ctx->keysize == 32)
type = ACRYPTO_TYPE_AES_256;
}
rctx->op = op;
rctx->mode = mode;
rctx->type = type;
rctx->iv = req->info;
rctx->ivsize = ivsize;
/*
* HEAVY TODO: needs to kick Herbert XU to write documentation.
* HEAVY TODO: needs to kick Herbert XU to write documentation.
* HEAVY TODO: needs to kick Herbert XU to write documentation.
*/
return hifn_handle_req(req);
}
static int hifn_process_queue(struct hifn_device *dev)
{
struct crypto_async_request *async_req, *backlog;
struct ablkcipher_request *req;
unsigned long flags;
int err = 0;
while (dev->started < HIFN_QUEUE_LENGTH) {
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
spin_unlock_irqrestore(&dev->lock, flags);
if (!async_req)
break;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req = container_of(async_req, struct ablkcipher_request, base);
err = hifn_handle_req(req);
if (err)
break;
}
return err;
}
static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
u8 type, u8 mode)
{
int err;
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
struct hifn_device *dev = ctx->dev;
err = hifn_setup_crypto_req(req, op, type, mode);
if (err)
return err;
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
hifn_process_queue(dev);
return -EINPROGRESS;
}
/*
* AES ecryption functions.
*/
static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
}
/*
* AES decryption functions.
*/
static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
}
static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
}
static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
}
/*
* DES ecryption functions.
*/
static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
}
/*
* DES decryption functions.
*/
static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
}
static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
}
static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
}
/*
* 3DES ecryption functions.
*/
static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
}
/*
* 3DES decryption functions.
*/
static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
}
static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
}
static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
{
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
}
struct hifn_alg_template
{
char name[CRYPTO_MAX_ALG_NAME];
char drv_name[CRYPTO_MAX_ALG_NAME];
unsigned int bsize;
struct ablkcipher_alg ablkcipher;
};
static struct hifn_alg_template hifn_alg_templates[] = {
/*
* 3DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_3des_cfb,
.decrypt = hifn_decrypt_3des_cfb,
},
},
{
.name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_3des_ofb,
.decrypt = hifn_decrypt_3des_ofb,
},
},
{
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_3des_cbc,
.decrypt = hifn_decrypt_3des_cbc,
},
},
{
.name = "ecb(des3_ede)", .drv_name = "ecb-3des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_3des_ecb,
.decrypt = hifn_decrypt_3des_ecb,
},
},
/*
* DES ECB, CBC, CFB and OFB modes.
*/
{
.name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_des_cfb,
.decrypt = hifn_decrypt_des_cfb,
},
},
{
.name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_des_ofb,
.decrypt = hifn_decrypt_des_ofb,
},
},
{
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
.ablkcipher = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_des_cbc,
.decrypt = hifn_decrypt_des_cbc,
},
},
{
.name = "ecb(des)", .drv_name = "ecb-des", .bsize = 8,
.ablkcipher = {
.min_keysize = HIFN_DES_KEY_LENGTH,
.max_keysize = HIFN_DES_KEY_LENGTH,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_des_ecb,
.decrypt = hifn_decrypt_des_ecb,
},
},
/*
* AES ECB, CBC, CFB and OFB modes.
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_aes_ecb,
.decrypt = hifn_decrypt_aes_ecb,
},
},
{
.name = "cbc(aes)", .drv_name = "cbc-aes", .bsize = 16,
.ablkcipher = {
.ivsize = HIFN_AES_IV_LENGTH,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_aes_cbc,
.decrypt = hifn_decrypt_aes_cbc,
},
},
{
.name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_aes_cfb,
.decrypt = hifn_decrypt_aes_cfb,
},
},
{
.name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = hifn_setkey,
.encrypt = hifn_encrypt_aes_ofb,
.decrypt = hifn_decrypt_aes_ofb,
},
},
};
static int hifn_cra_init(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
struct hifn_context *ctx = crypto_tfm_ctx(tfm);
ctx->dev = ha->dev;
tfm->crt_ablkcipher.reqsize = sizeof(struct hifn_request_context);
return 0;
}
static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
{
struct hifn_crypto_alg *alg;
int err;
alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
if (!alg)
return -ENOMEM;
snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
t->drv_name, dev->name);
alg->alg.cra_priority = 300;
alg->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
alg->alg.cra_blocksize = t->bsize;
alg->alg.cra_ctxsize = sizeof(struct hifn_context);
alg->alg.cra_alignmask = 0;
alg->alg.cra_type = &crypto_ablkcipher_type;
alg->alg.cra_module = THIS_MODULE;
alg->alg.cra_u.ablkcipher = t->ablkcipher;
alg->alg.cra_init = hifn_cra_init;
alg->dev = dev;
list_add_tail(&alg->entry, &dev->alg_list);
err = crypto_register_alg(&alg->alg);
if (err) {
list_del(&alg->entry);
kfree(alg);
}
return err;
}
static void hifn_unregister_alg(struct hifn_device *dev)
{
struct hifn_crypto_alg *a, *n;
list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
list_del(&a->entry);
crypto_unregister_alg(&a->alg);
kfree(a);
}
}
static int hifn_register_alg(struct hifn_device *dev)
{
int i, err;
for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
if (err)
goto err_out_exit;
}
return 0;
err_out_exit:
hifn_unregister_alg(dev);
return err;
}
static void hifn_tasklet_callback(unsigned long data)
{
struct hifn_device *dev = (struct hifn_device *)data;
/*
* This is ok to call this without lock being held,
* althogh it modifies some parameters used in parallel,
* (like dev->success), but they are used in process
* context or update is atomic (like setting dev->sa[i] to NULL).
*/
hifn_clear_rings(dev, 0);
if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
hifn_process_queue(dev);
}
static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err, i;
struct hifn_device *dev;
char name[8];
err = pci_enable_device(pdev);
if (err)
return err;
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err)
goto err_out_disable_pci_device;
snprintf(name, sizeof(name), "hifn%d",
atomic_inc_return(&hifn_dev_number)-1);
err = pci_request_regions(pdev, name);
if (err)
goto err_out_disable_pci_device;
if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
dprintk("%s: Broken hardware - I/O regions are too small.\n",
pci_name(pdev));
err = -ENODEV;
goto err_out_free_regions;
}
dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
GFP_KERNEL);
if (!dev) {
err = -ENOMEM;
goto err_out_free_regions;
}
INIT_LIST_HEAD(&dev->alg_list);
snprintf(dev->name, sizeof(dev->name), "%s", name);
spin_lock_init(&dev->lock);
for (i=0; i<3; ++i) {
unsigned long addr, size;
addr = pci_resource_start(pdev, i);
size = pci_resource_len(pdev, i);
dev->bar[i] = ioremap_nocache(addr, size);
if (!dev->bar[i])
goto err_out_unmap_bars;
}
dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
&dev->desc_dma);
if (!dev->desc_virt) {
dprintk("Failed to allocate descriptor rings.\n");
goto err_out_unmap_bars;
}
memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
dev->pdev = pdev;
dev->irq = pdev->irq;
for (i=0; i<HIFN_D_RES_RSIZE; ++i)
dev->sa[i] = NULL;
pci_set_drvdata(pdev, dev);
tasklet_init(&dev->tasklet, hifn_tasklet_callback, (unsigned long)dev);
crypto_init_queue(&dev->queue, 1);
err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
if (err) {
dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
dev->irq = 0;
goto err_out_free_desc;
}
err = hifn_start_device(dev);
if (err)
goto err_out_free_irq;
err = hifn_test(dev, 1, 0);
if (err)
goto err_out_stop_device;
err = hifn_register_rng(dev);
if (err)
goto err_out_stop_device;
err = hifn_register_alg(dev);
if (err)
goto err_out_unregister_rng;
INIT_DELAYED_WORK(&dev->work, hifn_work);
schedule_delayed_work(&dev->work, HZ);
dprintk("HIFN crypto accelerator card at %s has been "
"successfully registered as %s.\n",
pci_name(pdev), dev->name);
return 0;
err_out_unregister_rng:
hifn_unregister_rng(dev);
err_out_stop_device:
hifn_reset_dma(dev, 1);
hifn_stop_device(dev);
err_out_free_irq:
free_irq(dev->irq, dev->name);
tasklet_kill(&dev->tasklet);
err_out_free_desc:
pci_free_consistent(pdev, sizeof(struct hifn_dma),
dev->desc_virt, dev->desc_dma);
err_out_unmap_bars:
for (i=0; i<3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pci_device:
pci_disable_device(pdev);
return err;
}
static void __devexit hifn_remove(struct pci_dev *pdev)
{
int i;
struct hifn_device *dev;
dev = pci_get_drvdata(pdev);
if (dev) {
cancel_delayed_work_sync(&dev->work);
hifn_unregister_rng(dev);
hifn_unregister_alg(dev);
hifn_reset_dma(dev, 1);
hifn_stop_device(dev);
free_irq(dev->irq, dev->name);
tasklet_kill(&dev->tasklet);
hifn_flush(dev);
pci_free_consistent(pdev, sizeof(struct hifn_dma),
dev->desc_virt, dev->desc_dma);
for (i=0; i<3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
kfree(dev);
}
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_device_id hifn_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
{ PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
static struct pci_driver hifn_pci_driver = {
.name = "hifn795x",
.id_table = hifn_pci_tbl,
.probe = hifn_probe,
.remove = __devexit_p(hifn_remove),
};
static int __init hifn_init(void)
{
unsigned int freq;
int err;
/* HIFN supports only 32-bit addresses */
BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, "
"must be pci or ext");
return -EINVAL;
}
/*
* For the 7955/7956 the reference clock frequency must be in the
* range of 20MHz-100MHz. For the 7954 the upper bound is 66.67MHz,
* but this chip is currently not supported.
*/
if (hifn_pll_ref[3] != '\0') {
freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
if (freq < 20 || freq > 100) {
printk(KERN_ERR "hifn795x: invalid hifn_pll_ref "
"frequency, must be in the range "
"of 20-100");
return -EINVAL;
}
}
err = pci_register_driver(&hifn_pci_driver);
if (err < 0) {
dprintk("Failed to register PCI driver for %s device.\n",
hifn_pci_driver.name);
return -ENODEV;
}
printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
"has been successfully registered.\n");
return 0;
}
static void __exit hifn_fini(void)
{
pci_unregister_driver(&hifn_pci_driver);
printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
"has been successfully unregistered.\n");
}
module_init(hifn_init);
module_exit(hifn_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");
| gpl-2.0 |
TaichiN/android_kernel_lge_hammerhead | arch/powerpc/oprofile/op_model_rs64.c | 6922 | 4504 | /*
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
#define dbg(args...)
static void ctrl_write(unsigned int i, unsigned int val)
{
unsigned int tmp = 0;
unsigned long shift = 0, mask = 0;
dbg("ctrl_write %d %x\n", i, val);
switch(i) {
case 0:
tmp = mfspr(SPRN_MMCR0);
shift = 6;
mask = 0x7F;
break;
case 1:
tmp = mfspr(SPRN_MMCR0);
shift = 0;
mask = 0x3F;
break;
case 2:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 4;
mask = 0x1F;
break;
case 3:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 9;
mask = 0x1F;
break;
case 4:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 14;
mask = 0x1F;
break;
case 5:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 19;
mask = 0x1F;
break;
case 6:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 24;
mask = 0x1F;
break;
case 7:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 28;
mask = 0xF;
break;
}
tmp = tmp & ~(mask << shift);
tmp |= val << shift;
switch(i) {
case 0:
case 1:
mtspr(SPRN_MMCR0, tmp);
break;
default:
mtspr(SPRN_MMCR1, tmp);
}
dbg("ctrl_write mmcr0 %lx mmcr1 %lx\n", mfspr(SPRN_MMCR0),
mfspr(SPRN_MMCR1));
}
static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static int rs64_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_counters = num_ctrs;
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
/* XXX setup user and kernel profiling */
return 0;
}
static int rs64_cpu_setup(struct op_counter_config *ctr)
{
unsigned int mmcr0;
/* reset MMCR0 and set the freeze bit */
mmcr0 = MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
/* reset MMCR1, MMCRA */
mtspr(SPRN_MMCR1, 0);
if (cpu_has_feature(CPU_FTR_MMCRA))
mtspr(SPRN_MMCRA, 0);
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
/* Only applies to POWER3, but should be safe on RS64 */
mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
mtspr(SPRN_MMCR0, mmcr0);
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR0));
dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR1));
return 0;
}
static int rs64_start(struct op_counter_config *ctr)
{
int i;
unsigned int mmcr0;
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
classic_ctr_write(i, reset_value[i]);
ctrl_write(i, ctr[i].event);
} else {
classic_ctr_write(i, 0);
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/*
* now clear the freeze bit, counting will not start until we
* rfid from this excetion, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
return 0;
}
static void rs64_stop(void)
{
unsigned int mmcr0;
/* freeze counters */
mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 |= MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
mb();
}
static void rs64_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned int mmcr0;
int is_kernel;
int val;
int i;
unsigned long pc = mfspr(SPRN_SIAR);
is_kernel = is_kernel_addr(pc);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
val = classic_ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]);
} else {
classic_ctr_write(i, 0);
}
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/* reset the perfmon trigger */
mmcr0 |= MMCR0_PMXE;
/*
* now clear the freeze bit, counting will not start until we
* rfid from this exception, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
}
struct op_powerpc_model op_model_rs64 = {
.reg_setup = rs64_reg_setup,
.cpu_setup = rs64_cpu_setup,
.start = rs64_start,
.stop = rs64_stop,
.handle_interrupt = rs64_handle_interrupt,
};
| gpl-2.0 |
zales/RamosW17pro-kernel-common | drivers/net/phy/lxt.c | 8202 | 5309 | /*
* drivers/net/phy/lxt.c
*
* Driver for Intel LXT PHYs
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/* The Level one LXT970 is used by many boards */
#define MII_LXT970_IER 17 /* Interrupt Enable Register */
#define MII_LXT970_IER_IEN 0x0002
#define MII_LXT970_ISR 18 /* Interrupt Status Register */
#define MII_LXT970_CONFIG 19 /* Configuration Register */
/* ------------------------------------------------------------------------- */
/* The Level one LXT971 is used on some of my custom boards */
/* register definitions for the 971 */
#define MII_LXT971_IER 18 /* Interrupt Enable Register */
#define MII_LXT971_IER_IEN 0x00f2
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
/* register definitions for the 973 */
#define MII_LXT973_PCR 16 /* Port Configuration Register */
#define PCR_FIBER_SELECT 1
MODULE_DESCRIPTION("Intel LXT PHY driver");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
static int lxt970_ack_interrupt(struct phy_device *phydev)
{
int err;
err = phy_read(phydev, MII_BMSR);
if (err < 0)
return err;
err = phy_read(phydev, MII_LXT970_ISR);
if (err < 0)
return err;
return 0;
}
static int lxt970_config_intr(struct phy_device *phydev)
{
int err;
if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
err = phy_write(phydev, MII_LXT970_IER, 0);
return err;
}
static int lxt970_config_init(struct phy_device *phydev)
{
int err;
err = phy_write(phydev, MII_LXT970_CONFIG, 0);
return err;
}
static int lxt971_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_LXT971_ISR);
if (err < 0)
return err;
return 0;
}
static int lxt971_config_intr(struct phy_device *phydev)
{
int err;
if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
err = phy_write(phydev, MII_LXT971_IER, 0);
return err;
}
static int lxt973_probe(struct phy_device *phydev)
{
int val = phy_read(phydev, MII_LXT973_PCR);
if (val & PCR_FIBER_SELECT) {
/*
* If fiber is selected, then the only correct setting
* is 100Mbps, full duplex, and auto negotiation off.
*/
val = phy_read(phydev, MII_BMCR);
val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
val &= ~BMCR_ANENABLE;
phy_write(phydev, MII_BMCR, val);
/* Remember that the port is in fiber mode. */
phydev->priv = lxt973_probe;
} else {
phydev->priv = NULL;
}
return 0;
}
static int lxt973_config_aneg(struct phy_device *phydev)
{
/* Do nothing if port is in fiber mode. */
return phydev->priv ? 0 : genphy_config_aneg(phydev);
}
static struct phy_driver lxt970_driver = {
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
.driver = { .owner = THIS_MODULE,},
};
static struct phy_driver lxt971_driver = {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
.driver = { .owner = THIS_MODULE,},
};
static struct phy_driver lxt973_driver = {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
};
static int __init lxt_init(void)
{
int ret;
ret = phy_driver_register(&lxt970_driver);
if (ret)
goto err1;
ret = phy_driver_register(&lxt971_driver);
if (ret)
goto err2;
ret = phy_driver_register(&lxt973_driver);
if (ret)
goto err3;
return 0;
err3:
phy_driver_unregister(&lxt971_driver);
err2:
phy_driver_unregister(&lxt970_driver);
err1:
return ret;
}
static void __exit lxt_exit(void)
{
phy_driver_unregister(&lxt970_driver);
phy_driver_unregister(&lxt971_driver);
phy_driver_unregister(&lxt973_driver);
}
module_init(lxt_init);
module_exit(lxt_exit);
static struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, lxt_tbl);
| gpl-2.0 |
mmihail/trinitycore10353 | dep/acelite/ace/UUID.cpp | 11 | 15905 | //$Id: UUID.cpp 88515 2010-01-13 08:47:38Z johnnyw $
#include "ace/UUID.h"
#include "ace/Guard_T.h"
#if !defined (__ACE_INLINE__)
#include "ace/UUID.inl"
#endif /* __ACE_INLINE__ */
#include "ace/Log_Msg.h"
#include "ace/OS_NS_stdio.h"
#include "ace/OS_NS_string.h"
#include "ace/OS_NS_sys_time.h"
#include "ace/OS_NS_netdb.h"
#include "ace/OS_NS_unistd.h"
#include "ace/ACE.h"
ACE_RCSID (ace,
UUID,
"$Id: UUID.cpp 88515 2010-01-13 08:47:38Z johnnyw $")
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
namespace ACE_Utils
{
// NIL version of the UUID
const UUID UUID::NIL_UUID;
UUID::UUID (const ACE_CString& uuid_string)
{
this->init ();
this->from_string_i (uuid_string);
}
const UUID &
UUID::operator = (const UUID & rhs)
{
if (this != &rhs)
{
// Reset the string version of the UUID a string version
// exist, and the UUID is not equal to the old UUID.
if (0 != this->as_string_.get ())
{
if (0 == rhs.as_string_.get () || *this != rhs)
this->as_string_.reset ();
}
// Copy the contents of the UUID.
ACE_OS::memcpy (&this->uuid_, &rhs.uuid_, BINARY_SIZE);
/// @todo We should create an UUID_Ex class for UUIDs that
/// contain the thread id and process id.
this->thr_id_ = rhs.thr_id_;
this->pid_ = rhs.pid_;
}
return *this;
}
const ACE_CString * UUID::to_string (void) const
{
// Compute the string representation only once.
if (0 != this->as_string_.get ())
return this->as_string_.get ();
// Get a buffer exactly the correct size. Use the nil UUID as a
// gauge. Don't forget the trailing nul.
ACE_Auto_Array_Ptr <char> auto_clean;
size_t UUID_STRING_LENGTH = 36 + thr_id_.length () + pid_.length ();
char *buf = 0;
if (36 == UUID_STRING_LENGTH)
{
ACE_NEW_RETURN (buf,
char[UUID_STRING_LENGTH + 1],
0);
// Let the auto array pointer manage the buffer.
auto_clean.reset (buf);
ACE_OS::sprintf (buf,
"%8.8x-%4.4x-%4.4x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x",
this->uuid_.time_low_,
this->uuid_.time_mid_,
this->uuid_.time_hi_and_version_,
this->uuid_.clock_seq_hi_and_reserved_,
this->uuid_.clock_seq_low_,
(this->uuid_.node_.node_ID ()) [0],
(this->uuid_.node_.node_ID ()) [1],
(this->uuid_.node_.node_ID ()) [2],
(this->uuid_.node_.node_ID ()) [3],
(this->uuid_.node_.node_ID ()) [4],
(this->uuid_.node_.node_ID ()) [5]);
}
else
{
UUID_STRING_LENGTH += 2; //for '-'
ACE_NEW_RETURN (buf,
char[UUID_STRING_LENGTH + 1],
0);
// Let the auto array pointer manage the buffer.
auto_clean.reset (buf);
ACE_OS::sprintf (buf,
"%8.8x-%4.4x-%4.4x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x-%s-%s",
this->uuid_.time_low_,
this->uuid_.time_mid_,
this->uuid_.time_hi_and_version_,
this->uuid_.clock_seq_hi_and_reserved_,
this->uuid_.clock_seq_low_,
(this->uuid_.node_.node_ID ()) [0],
(this->uuid_.node_.node_ID ()) [1],
(this->uuid_.node_.node_ID ()) [2],
(this->uuid_.node_.node_ID ()) [3],
(this->uuid_.node_.node_ID ()) [4],
(this->uuid_.node_.node_ID ()) [5],
thr_id_.c_str (),
pid_.c_str ());
}
// Save the string.
ACE_CString * as_string = 0;
ACE_NEW_RETURN (as_string,
ACE_CString (buf, UUID_STRING_LENGTH),
0);
this->as_string_.reset (as_string);
return this->as_string_.get ();
}
void
UUID::from_string_i (const ACE_CString& uuid_string)
{
if (uuid_string.length () < NIL_UUID.to_string ()->length ())
{
ACE_ERROR ((LM_ERROR,
"%N ACE_UUID::from_string_i - "
"IllegalArgument (incorrect string length)\n"));
return;
}
/// Special case for the nil UUID.
if (uuid_string == *NIL_UUID.to_string ())
{
*this = NIL_UUID;
return;
}
unsigned int time_low;
unsigned int time_mid;
unsigned int time_hi_and_version;
unsigned int clock_seq_hi_and_reserved;
unsigned int clock_seq_low;
unsigned int node [UUID_Node::NODE_ID_SIZE];
char thr_pid_buf [BUFSIZ];
if (uuid_string.length () == NIL_UUID.to_string ()->length ())
{
// This might seem quite strange this being in ACE, but it
// seems to be a bit difficult to write a facade for ::sscanf
// because some compilers dont support vsscanf, including
// MSVC. It appears that most platforms support sscanf though
// so we need to use it directly.
const int nScanned =
#if defined (ACE_HAS_TR24731_2005_CRT)
sscanf_s (
#else
::sscanf (
#endif /* ACE_HAS_TR24731_2005_CRT */
uuid_string.c_str (),
"%8x-%4x-%4x-%2x%2x-%2x%2x%2x%2x%2x%2x",
&time_low,
&time_mid,
&time_hi_and_version,
&clock_seq_hi_and_reserved,
&clock_seq_low,
&node[0],
&node[1],
&node[2],
&node[3],
&node[4],
&node[5]
);
if (nScanned != 11)
{
ACE_DEBUG ((LM_DEBUG,
"UUID::from_string_i - "
"IllegalArgument (invalid string representation)\n"));
return;
}
}
else
{
const int nScanned =
#if defined (ACE_HAS_TR24731_2005_CRT)
sscanf_s (uuid_string.c_str (),
"%8x-%4x-%4x-%2x%2x-%2x%2x%2x%2x%2x%2x-%s",
&time_low,
&time_mid,
&time_hi_and_version,
&clock_seq_hi_and_reserved,
&clock_seq_low,
&node[0],
&node[1],
&node[2],
&node[3],
&node[4],
&node[5],
thr_pid_buf,
BUFSIZ
);
#else
::sscanf (uuid_string.c_str (),
"%8x-%4x-%4x-%2x%2x-%2x%2x%2x%2x%2x%2x-%s",
&time_low,
&time_mid,
&time_hi_and_version,
&clock_seq_hi_and_reserved,
&clock_seq_low,
&node[0],
&node[1],
&node[2],
&node[3],
&node[4],
&node[5],
thr_pid_buf
);
#endif /* ACE_HAS_TR24731_2005_CRT */
if (nScanned != 12)
{
ACE_DEBUG ((LM_DEBUG,
"ACE_UUID::from_string_i - "
"IllegalArgument (invalid string representation)\n"));
return;
}
}
this->uuid_.time_low_ = static_cast<ACE_UINT32> (time_low);
this->uuid_.time_mid_ = static_cast<ACE_UINT16> (time_mid);
this->uuid_.time_hi_and_version_ = static_cast<ACE_UINT16> (time_hi_and_version);
this->uuid_.clock_seq_hi_and_reserved_ = static_cast<u_char> (clock_seq_hi_and_reserved);
this->uuid_.clock_seq_low_ = static_cast<u_char> (clock_seq_low);
for (size_t i = 0; i < UUID_Node::NODE_ID_SIZE; ++ i)
this->uuid_.node_.node_ID ()[i] = static_cast <u_char> (node[i]);
// Support varient 10- only
if ((this->uuid_.clock_seq_hi_and_reserved_ & 0xc0) != 0x80 &&
(this->uuid_.clock_seq_hi_and_reserved_ & 0xc0) != 0xc0)
{
ACE_DEBUG ((LM_DEBUG,
"ACE_UUID::from_string_i - "
"IllegalArgument (unsupported variant)\n"));
return;
}
/// Support versions 1, 3, and 4 only
ACE_UINT16 V1 = this->uuid_.time_hi_and_version_;
if ((V1 & 0xF000) != 0x1000 &&
(V1 & 0xF000) != 0x3000 &&
(V1 & 0xF000) != 0x4000)
{
ACE_DEBUG ((LM_DEBUG,
"ACE_UUID::from_string_i - "
"IllegalArgument (unsupported version)\n"));
return;
}
if ((this->uuid_.clock_seq_hi_and_reserved_ & 0xc0) == 0xc0)
{
if (uuid_string.length () == NIL_UUID.to_string ()->length ())
{
ACE_DEBUG ((LM_DEBUG,
"ACE_UUID::from_string_i - "
"IllegalArgument (Missing Thread and Process Id)\n"));
return;
}
ACE_CString thr_pid_str (thr_pid_buf);
ssize_t pos = static_cast<ssize_t> (thr_pid_str.find ('-'));
if (pos == -1)
ACE_DEBUG ((LM_DEBUG,
"ACE_UUID::from_string_i - "
"IllegalArgument (Thread and Process Id format incorrect)\n"));
this->thr_id_ = thr_pid_str.substr (0, pos);
this->pid_ = thr_pid_str.substr (pos+1, thr_pid_str.length ()-pos-1);
}
}
UUID_Generator::UUID_Generator (void)
: time_last_ (0),
destroy_lock_ (true),
is_init_ (false)
{
ACE_NEW (lock_, ACE_SYNCH_MUTEX);
this->init ();
}
UUID_Generator::~UUID_Generator (void)
{
if (destroy_lock_)
delete lock_;
}
void
UUID_Generator::init (void)
{
if (this->is_init_)
return;
ACE_OS::macaddr_node_t macaddress;
int const result = ACE_OS::getmacaddress (&macaddress);
UUID_Node::Node_ID node_id;
if (-1 != result)
{
ACE_OS::memcpy (node_id,
macaddress.node,
UUID_Node::NODE_ID_SIZE);
}
else
{
node_id [0] = static_cast<u_char> (ACE_OS::rand ());
node_id [1] = static_cast<u_char> (ACE_OS::rand ());
node_id [2] = static_cast<u_char> (ACE_OS::rand ());
node_id [3] = static_cast<u_char> (ACE_OS::rand ());
node_id [4] = static_cast<u_char> (ACE_OS::rand ());
node_id [5] = static_cast<u_char> (ACE_OS::rand ());
}
this->get_timestamp (time_last_);
{
ACE_GUARD (ACE_SYNCH_MUTEX, ace_mon, *lock_);
uuid_state_.timestamp = time_last_;
ACE_OS::memcpy (uuid_state_.node.node_ID (),
node_id,
UUID_Node::NODE_ID_SIZE);
}
this->is_init_ = true;
}
void
UUID_Generator::
generate_UUID (UUID& uuid, ACE_UINT16 version, u_char variant)
{
UUID_Time timestamp;
ACE_UINT16 clock_sequence;
this->get_timestamp_and_clocksequence (timestamp,
clock_sequence);
// Construct a Version 1 UUID with the information in the arguements.
uuid.time_low (static_cast<ACE_UINT32> (timestamp & 0xFFFFFFFF));
uuid.time_mid (static_cast<ACE_UINT16> ((timestamp >> 32) & 0xFFFF));
ACE_UINT16 tHAV = static_cast<ACE_UINT16> ((timestamp >> 48) & 0xFFFF);
tHAV |= (version << 12);
uuid.time_hi_and_version (tHAV);
u_char cseqHAV;
uuid.clock_seq_low (static_cast<u_char> (clock_sequence & 0xFF));
cseqHAV = static_cast<u_char> ((clock_sequence & 0x3f00) >> 8);
uuid_state_.timestamp = timestamp;
cseqHAV |= variant;
uuid.clock_seq_hi_and_reserved (cseqHAV);
uuid.node (uuid_state_.node);
if (variant == 0xc0)
{
ACE_Thread_ID thread_id;
char buf [BUFSIZ];
thread_id.to_string (buf);
uuid.thr_id (buf);
ACE_OS::sprintf (buf,
"%d",
static_cast<int> (ACE_OS::getpid ()));
uuid.pid (buf);
}
}
UUID*
UUID_Generator::generate_UUID (ACE_UINT16 version, u_char variant)
{
UUID* uuid = 0;
ACE_NEW_RETURN (uuid,
UUID,
0);
this->generate_UUID (*uuid, version, variant);
return uuid;
}
/// Obtain a new timestamp. If UUID's are being generated too quickly
/// the clock sequence will be incremented
void
UUID_Generator::get_timestamp (UUID_Time& timestamp)
{
ACE_GUARD (ACE_SYNCH_MUTEX, mon, *lock_);
this->get_systemtime (timestamp);
// Account for the clock being set back. Increment the clock /
// sequence.
if (timestamp <= time_last_)
{
uuid_state_.clock_sequence = static_cast<ACE_UINT16>
((uuid_state_.clock_sequence + 1) & ACE_UUID_CLOCK_SEQ_MASK);
}
// If the system time ticked since the last UUID was
// generated. Set / the clock sequence back.
else if (timestamp > time_last_)
{
uuid_state_.clock_sequence = 0;
}
time_last_ = timestamp;
}
void
UUID_Generator::get_timestamp_and_clocksequence (UUID_Time& timestamp,
ACE_UINT16& clock_sequence)
{
ACE_GUARD (ACE_SYNCH_MUTEX, mon, *lock_);
this->get_systemtime (timestamp);
// Account for the clock being set back. Increment the clock /
// sequence.
if (timestamp <= time_last_)
uuid_state_.clock_sequence = static_cast<ACE_UINT16> ((uuid_state_.clock_sequence + 1) & ACE_UUID_CLOCK_SEQ_MASK);
// If the system time ticked since the last UUID was
// generated. Set / the clock sequence back.
else if (timestamp > time_last_)
uuid_state_.clock_sequence = 0;
time_last_ = timestamp;
clock_sequence = uuid_state_.clock_sequence;
}
/**
* ACE_Time_Value is in POSIX time, seconds since Jan 1, 1970. UUIDs use
* time in 100ns ticks since 15 October 1582. The difference is:
* 15 Oct 1582 - 1 Jan 1600: 17 days in Oct, 30 in Nov, 31 in Dec +
* 17 years and 4 leap days (1584, 88, 92 and 96)
* 1 Jan 1600 - 1 Jan 1900: 3 centuries + 73 leap days ( 25 in 17th cent.
* and 24 each in 18th and 19th centuries)
* 1 Jan 1900 - 1 Jan 1970: 70 years + 17 leap days.
* This adds up, in days: (17+30+31+365*17+4)+ (365*300+73)+ (365*70+17) or
* 122192928000000000U (0x1B21DD213814000) 100 ns ticks.
*/
void
UUID_Generator::get_systemtime (UUID_Time & timestamp)
{
const UUID_Time timeOffset =
#if defined (ACE_LACKS_UNSIGNEDLONGLONG_T)
ACE_U_LongLong (ACE_INT64_LITERAL (0x1B21DD213814000));
#elif defined (ACE_LACKS_LONGLONG_T)
ACE_U_LongLong (0x13814000u, 0x1B21DD2u);
#else
ACE_UINT64_LITERAL (0x1B21DD213814000);
#endif /* ACE_LACKS_UNSIGNEDLONGLONG_T */
/// Get the time of day, convert to 100ns ticks then add the offset.
ACE_Time_Value now = ACE_OS::gettimeofday ();
ACE_UINT64 time;
now.to_usec (time);
time = time * 10;
timestamp = time + timeOffset;
}
ACE_SYNCH_MUTEX*
UUID_Generator::lock (void)
{
return this->lock_;
}
void
UUID_Generator::lock (ACE_SYNCH_MUTEX* lock, bool release_lock)
{
if (this->destroy_lock_)
delete this->lock_;
this->lock_ = lock;
this->destroy_lock_ = release_lock;
}
}
#if defined (ACE_HAS_EXPLICIT_STATIC_TEMPLATE_MEMBER_INSTANTIATION)
template ACE_Singleton <ACE_Utils::UUID_Generator, ACE_SYNCH_MUTEX> *
ACE_Singleton <ACE_Utils::UUID_Generator, ACE_SYNCH_MUTEX>::singleton_;
#endif /* ACE_HAS_EXPLICIT_STATIC_TEMPLATE_MEMBER_INSTANTIATION */
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
ack3000/busybox | miscutils/runlevel.c | 11 | 1324 | /* vi: set sw=4 ts=4: */
/*
* Prints out the previous and the current runlevel.
*
* Version: @(#)runlevel 1.20 16-Apr-1997 MvS
*
* This file is part of the sysvinit suite,
* Copyright 1991-1997 Miquel van Smoorenburg.
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*
* initially busyboxified by Bernhard Reutner-Fischer
*/
//usage:#define runlevel_trivial_usage
//usage: "[FILE]"
//usage:#define runlevel_full_usage "\n\n"
//usage: "Find the current and previous system runlevel\n"
//usage: "\n"
//usage: "If no utmp FILE exists or if no runlevel record can be found,\n"
//usage: "print \"unknown\""
//usage:
//usage:#define runlevel_example_usage
//usage: "$ runlevel /var/run/utmp\n"
//usage: "N 2"
#include "libbb.h"
int runlevel_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
int runlevel_main(int argc UNUSED_PARAM, char **argv)
{
struct utmp *ut;
char prev;
if (argv[1]) utmpname(argv[1]);
setutent();
while ((ut = getutent()) != NULL) {
if (ut->ut_type == RUN_LVL) {
prev = ut->ut_pid / 256;
if (prev == 0) prev = 'N';
printf("%c %c\n", prev, ut->ut_pid % 256);
if (ENABLE_FEATURE_CLEAN_UP)
endutent();
return 0;
}
}
puts("unknown");
if (ENABLE_FEATURE_CLEAN_UP)
endutent();
return 1;
}
| gpl-2.0 |
NoelMacwan/android_kernel_sony_msm8928 | drivers/video/msm/mdss/mdss_mdp_ctl.c | 11 | 51123 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include "mdss_fb.h"
#include "mdss_mdp.h"
/* truncate at 1k */
#define MDSS_MDP_BUS_FACTOR_SHIFT 10
/* 1.5 bus fudge factor */
#define MDSS_MDP_BUS_FUDGE_FACTOR_IB(val) (((val) / 2) * 3)
#define MDSS_MDP_BUS_FUDGE_FACTOR_HIGH_IB(val) (val << 1)
#define MDSS_MDP_BUS_FUDGE_FACTOR_AB(val) (val << 1)
#define MDSS_MDP_BUS_FLOOR_BW (1600000000ULL >> MDSS_MDP_BUS_FACTOR_SHIFT)
/* 1.25 clock fudge factor */
#define MDSS_MDP_CLK_FUDGE_FACTOR(val) (((val) * 5) / 4)
enum {
MDSS_MDP_PERF_UPDATE_SKIP,
MDSS_MDP_PERF_UPDATE_EARLY,
MDSS_MDP_PERF_UPDATE_LATE,
};
#define MDSS_MDP_PERF_UPDATE_CLK BIT(0)
#define MDSS_MDP_PERF_UPDATE_BUS BIT(1)
#define MDSS_MDP_PERF_UPDATE_ALL -1
static DEFINE_MUTEX(mdss_mdp_ctl_lock);
static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer);
static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer,
u32 reg, u32 val)
{
writel_relaxed(val, mixer->base + reg);
}
static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
{
struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
return (ctl->intf_type == MDSS_INTF_DSI) ?
pinfo->mipi.dsi_pclk_rate :
pinfo->clk_rate;
}
static u32 __mdss_mdp_ctrl_perf_ovrd_helper(struct mdss_mdp_mixer *mixer,
u32 *npipe)
{
struct mdss_panel_info *pinfo;
struct mdss_mdp_pipe *pipe;
u32 mnum, ovrd = 0;
if (!mixer || !mixer->ctl->panel_data)
return 0;
pinfo = &mixer->ctl->panel_data->panel_info;
for (mnum = 0; mnum < MDSS_MDP_MAX_STAGE; mnum++) {
pipe = mixer->stage_pipe[mnum];
if (pipe && pinfo) {
*npipe = *npipe + 1;
if ((pipe->src.w >= pipe->src.h) &&
(pipe->src.w >= pinfo->xres))
ovrd = 1;
}
}
return ovrd;
}
/**
* mdss_mdp_ctrl_perf_ovrd() - Determines if performance override is needed
* @mdata: Struct containing references to all MDP5 hardware structures
* and status info such as interupts, target caps etc.
* @ab_quota: Arbitrated bandwidth quota
* @ib_quota: Instantaneous bandwidth quota
*
* Function calculates the minimum required MDP and BIMC clocks to avoid MDP
* underflow during portrait video playback. The calculations are based on the
* way MDP fetches (bandwidth requirement) and processes data through
* MDP pipeline (MDP clock requirement) based on frame size and scaling
* requirements.
*/
static void __mdss_mdp_ctrl_perf_ovrd(struct mdss_data_type *mdata,
u64 *ab_quota, u64 *ib_quota)
{
struct mdss_mdp_ctl *ctl;
u32 i, npipe = 0, ovrd = 0;
for (i = 0; i < mdata->nctl; i++) {
ctl = mdata->ctl_off + i;
if (!ctl->power_on)
continue;
ovrd |= __mdss_mdp_ctrl_perf_ovrd_helper(
ctl->mixer_left, &npipe);
ovrd |= __mdss_mdp_ctrl_perf_ovrd_helper(
ctl->mixer_right, &npipe);
}
*ab_quota = MDSS_MDP_BUS_FUDGE_FACTOR_AB(*ab_quota);
if (npipe > 1)
*ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR_HIGH_IB(*ib_quota);
else
*ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR_IB(*ib_quota);
if (ovrd && (*ib_quota < MDSS_MDP_BUS_FLOOR_BW)) {
*ib_quota = MDSS_MDP_BUS_FLOOR_BW;
pr_debug("forcing the BIMC clock to 200 MHz : %llu bytes",
*ib_quota);
} else {
pr_debug("ib quota : %llu bytes", *ib_quota);
}
}
static int mdss_mdp_ctl_perf_commit(struct mdss_data_type *mdata, u32 flags)
{
struct mdss_mdp_ctl *ctl;
int cnum;
unsigned long clk_rate = 0;
u64 bus_ab_quota = 0, bus_ib_quota = 0;
if (!flags) {
pr_err("nothing to update\n");
return -EINVAL;
}
mutex_lock(&mdss_mdp_ctl_lock);
for (cnum = 0; cnum < mdata->nctl; cnum++) {
ctl = mdata->ctl_off + cnum;
if (ctl->power_on) {
bus_ab_quota += ctl->bus_ab_quota;
bus_ib_quota += ctl->bus_ib_quota;
if (ctl->clk_rate > clk_rate)
clk_rate = ctl->clk_rate;
}
}
if (flags & MDSS_MDP_PERF_UPDATE_BUS) {
bus_ab_quota = bus_ib_quota;
__mdss_mdp_ctrl_perf_ovrd(mdata, &bus_ab_quota, &bus_ib_quota);
bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
bus_ab_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
}
if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
clk_rate = MDSS_MDP_CLK_FUDGE_FACTOR(clk_rate);
pr_debug("update clk rate = %lu HZ\n", clk_rate);
mdss_mdp_set_clk_rate(clk_rate);
}
mutex_unlock(&mdss_mdp_ctl_lock);
return 0;
}
/**
* mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe
* @pipe: Source pipe struct containing updated pipe params
* @perf: Structure containing values that should be updated for
* performance tuning
*
* Function calculates the minimum required performance calculations in order
* to avoid MDP underflow. The calculations are based on the way MDP
* fetches (bandwidth requirement) and processes data through MDP pipeline
* (MDP clock requirement) based on frame size and scaling requirements.
*/
int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_perf_params *perf)
{
struct mdss_mdp_mixer *mixer;
int fps = DEFAULT_FRAME_RATE;
u32 quota, rate, v_total, src_h;
if (!pipe || !perf || !pipe->mixer)
return -EINVAL;
mixer = pipe->mixer;
if (mixer->rotator_mode) {
v_total = pipe->flags & MDP_ROT_90 ? pipe->dst.w : pipe->dst.h;
} else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
struct mdss_panel_info *pinfo;
pinfo = &mixer->ctl->panel_data->panel_info;
fps = mdss_panel_get_framerate(pinfo);
v_total = mdss_panel_get_vtotal(pinfo);
} else {
v_total = mixer->height;
}
/*
* when doing vertical decimation lines will be skipped, hence there is
* no need to account for these lines in MDP clock or request bus
* bandwidth to fetch them.
*/
src_h = pipe->src.h >> pipe->vert_deci;
quota = fps * pipe->src.w * src_h;
if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
/*
* with decimation, chroma is not downsampled, this means we
* need to allocate bw for extra lines that will be fetched
*/
if (pipe->vert_deci)
quota *= 2;
else
quota = (quota * 3) / 2;
else
quota *= pipe->src_fmt->bpp;
rate = pipe->dst.w;
if (src_h > pipe->dst.h)
rate = (rate * src_h) / pipe->dst.h;
rate *= v_total * fps;
if (mixer->rotator_mode) {
rate /= 4; /* block mode fetch at 4 pix/clk */
quota *= 2; /* bus read + write */
perf->ib_quota = quota;
} else {
perf->ib_quota = (quota / pipe->dst.h) * v_total;
}
perf->ab_quota = quota;
perf->mdp_clk_rate = rate;
pr_debug("mixer=%d pnum=%d clk_rate=%u bus ab=%u ib=%u\n",
mixer->num, pipe->num, rate, perf->ab_quota, perf->ib_quota);
return 0;
}
static void mdss_mdp_perf_mixer_update(struct mdss_mdp_mixer *mixer,
u32 *bus_ab_quota, u32 *bus_ib_quota,
u32 *clk_rate)
{
struct mdss_mdp_pipe *pipe;
struct mdss_panel_info *pinfo = NULL;
int fps = DEFAULT_FRAME_RATE;
u32 v_total;
int i;
u32 max_clk_rate = 0, ab_total = 0, ib_total = 0;
*bus_ab_quota = 0;
*bus_ib_quota = 0;
*clk_rate = 0;
if (!mixer->rotator_mode) {
if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
pinfo = &mixer->ctl->panel_data->panel_info;
fps = mdss_panel_get_framerate(pinfo);
v_total = mdss_panel_get_vtotal(pinfo);
if (pinfo->type == WRITEBACK_PANEL)
pinfo = NULL;
} else {
v_total = mixer->height;
}
*clk_rate = mixer->width * v_total * fps;
if (pinfo && pinfo->lcdc.v_back_porch < MDP_MIN_VBP)
*clk_rate = MDSS_MDP_CLK_FUDGE_FACTOR(*clk_rate);
if (!pinfo) {
/* perf for bus writeback */
*bus_ab_quota = fps * mixer->width * mixer->height * 3;
*bus_ab_quota >>= MDSS_MDP_BUS_FACTOR_SHIFT;
*bus_ib_quota = *bus_ab_quota;
}
}
for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
struct mdss_mdp_perf_params perf;
pipe = mixer->stage_pipe[i];
if (pipe == NULL)
continue;
if (mdss_mdp_perf_calc_pipe(pipe, &perf))
continue;
ab_total += perf.ab_quota >> MDSS_MDP_BUS_FACTOR_SHIFT;
ib_total += perf.ib_quota >> MDSS_MDP_BUS_FACTOR_SHIFT;
if (perf.mdp_clk_rate > max_clk_rate)
max_clk_rate = perf.mdp_clk_rate;
}
*bus_ab_quota += ab_total;
*bus_ib_quota += ib_total;
if (max_clk_rate > *clk_rate)
*clk_rate = max_clk_rate;
pr_debug("final mixer=%d clk_rate=%u bus ab=%u ib=%u\n", mixer->num,
*clk_rate, *bus_ab_quota, *bus_ib_quota);
}
static int mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl)
{
int ret = MDSS_MDP_PERF_UPDATE_SKIP;
u32 clk_rate, ab_quota, ib_quota;
u32 max_clk_rate = 0, total_ab_quota = 0, total_ib_quota = 0;
if (ctl->mixer_left) {
mdss_mdp_perf_mixer_update(ctl->mixer_left, &ab_quota,
&ib_quota, &clk_rate);
total_ab_quota += ab_quota;
total_ib_quota += ib_quota;
max_clk_rate = clk_rate;
}
if (ctl->mixer_right) {
mdss_mdp_perf_mixer_update(ctl->mixer_right, &ab_quota,
&ib_quota, &clk_rate);
total_ab_quota += ab_quota;
total_ib_quota += ib_quota;
if (clk_rate > max_clk_rate)
max_clk_rate = clk_rate;
if (ctl->intf_type) {
clk_rate = mdss_mdp_get_pclk_rate(ctl);
/* minimum clock rate due to inefficiency in 3dmux */
clk_rate = mult_frac(clk_rate >> 1, 9, 8);
if (clk_rate > max_clk_rate)
max_clk_rate = clk_rate;
}
}
/* request minimum bandwidth to have bus clock on when display is on */
if (total_ib_quota == 0)
total_ib_quota = SZ_16M >> MDSS_MDP_BUS_FACTOR_SHIFT;
if (max_clk_rate != ctl->clk_rate) {
if (max_clk_rate > ctl->clk_rate)
ret = MDSS_MDP_PERF_UPDATE_EARLY;
else
ret = MDSS_MDP_PERF_UPDATE_LATE;
ctl->clk_rate = max_clk_rate;
ctl->perf_changed |= MDSS_MDP_PERF_UPDATE_CLK;
}
if ((total_ab_quota != ctl->bus_ab_quota) ||
(total_ib_quota != ctl->bus_ib_quota)) {
if (ret == MDSS_MDP_PERF_UPDATE_SKIP) {
if (total_ib_quota >= ctl->bus_ib_quota)
ret = MDSS_MDP_PERF_UPDATE_EARLY;
else
ret = MDSS_MDP_PERF_UPDATE_LATE;
}
ctl->bus_ab_quota = total_ab_quota;
ctl->bus_ib_quota = total_ib_quota;
ctl->perf_changed |= MDSS_MDP_PERF_UPDATE_BUS;
}
return ret;
}
static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
u32 off)
{
struct mdss_mdp_ctl *ctl = NULL;
u32 cnum;
u32 nctl = mdata->nctl;
mutex_lock(&mdss_mdp_ctl_lock);
if (!mdata->has_wfd_blk)
nctl++;
for (cnum = off; cnum < nctl; cnum++) {
ctl = mdata->ctl_off + cnum;
if (ctl->ref_cnt == 0) {
ctl->ref_cnt++;
ctl->mdata = mdata;
mutex_init(&ctl->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
pr_debug("alloc ctl_num=%d\n", ctl->num);
break;
}
ctl = NULL;
}
mutex_unlock(&mdss_mdp_ctl_lock);
return ctl;
}
static int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
{
if (!ctl)
return -ENODEV;
pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt);
if (!ctl->ref_cnt) {
pr_err("called with ref_cnt=0\n");
return -EINVAL;
}
if (ctl->mixer_left) {
mdss_mdp_mixer_free(ctl->mixer_left);
ctl->mixer_left = NULL;
}
if (ctl->mixer_right) {
mdss_mdp_mixer_free(ctl->mixer_right);
ctl->mixer_right = NULL;
}
mutex_lock(&mdss_mdp_ctl_lock);
ctl->ref_cnt--;
ctl->intf_num = MDSS_MDP_NO_INTF;
ctl->intf_type = MDSS_MDP_NO_INTF;
ctl->is_secure = false;
ctl->power_on = false;
ctl->start_fnc = NULL;
ctl->stop_fnc = NULL;
ctl->prepare_fnc = NULL;
ctl->display_fnc = NULL;
ctl->wait_fnc = NULL;
ctl->read_line_cnt_fnc = NULL;
ctl->add_vsync_handler = NULL;
ctl->remove_vsync_handler = NULL;
ctl->panel_data = NULL;
ctl->config_fps_fnc = NULL;
mutex_unlock(&mdss_mdp_ctl_lock);
return 0;
}
static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
struct mdss_mdp_ctl *ctl, u32 type, int mux)
{
struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
u32 nmixers_intf;
u32 nmixers_wb;
u32 i;
u32 nmixers;
struct mdss_mdp_mixer *mixer_pool = NULL;
if (!ctl || !ctl->mdata)
return NULL;
mutex_lock(&mdss_mdp_ctl_lock);
nmixers_intf = ctl->mdata->nmixers_intf;
nmixers_wb = ctl->mdata->nmixers_wb;
switch (type) {
case MDSS_MDP_MIXER_TYPE_INTF:
mixer_pool = ctl->mdata->mixer_intf;
nmixers = nmixers_intf;
/*
* try to reserve first layer mixer for write back if
* assertive display needs to be supported through wfd
*/
if (ctl->mdata->has_wb_ad && ctl->intf_num) {
alt_mixer = mixer_pool;
mixer_pool++;
nmixers--;
}
break;
case MDSS_MDP_MIXER_TYPE_WRITEBACK:
mixer_pool = ctl->mdata->mixer_wb;
nmixers = nmixers_wb;
break;
default:
nmixers = 0;
pr_err("invalid pipe type %d\n", type);
break;
}
/* early mdp revision only supports mux of dual pipe on mixers 0 and 1,
* need to ensure that these pipes are readily available by using
* mixer 2 if available and mux is not required */
if (!mux && (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_100) &&
(type == MDSS_MDP_MIXER_TYPE_INTF) &&
(nmixers >= MDSS_MDP_INTF_LAYERMIXER2) &&
(mixer_pool[MDSS_MDP_INTF_LAYERMIXER2].ref_cnt == 0))
mixer_pool += MDSS_MDP_INTF_LAYERMIXER2;
/*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/
if (!ctl->mdata->has_wfd_blk && (type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
nmixers += 1;
for (i = 0; i < nmixers; i++) {
mixer = mixer_pool + i;
if (mixer->ref_cnt == 0) {
mixer->ref_cnt++;
mixer->params_changed++;
mixer->ctl = ctl;
pr_debug("alloc mixer num %d for ctl=%d\n",
mixer->num, ctl->num);
break;
}
mixer = NULL;
}
if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0))
mixer = alt_mixer;
mutex_unlock(&mdss_mdp_ctl_lock);
return mixer;
}
static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
{
if (!mixer)
return -ENODEV;
pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt);
if (!mixer->ref_cnt) {
pr_err("called with ref_cnt=0\n");
return -EINVAL;
}
mutex_lock(&mdss_mdp_ctl_lock);
mixer->ref_cnt--;
mutex_unlock(&mdss_mdp_ctl_lock);
return 0;
}
struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator)
{
struct mdss_mdp_ctl *ctl = NULL;
struct mdss_mdp_mixer *mixer = NULL;
ctl = mdss_mdp_ctl_alloc(mdss_res, mdss_res->nmixers_intf);
if (!ctl) {
pr_debug("unable to allocate wb ctl\n");
return NULL;
}
mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false);
if (!mixer) {
pr_debug("unable to allocate wb mixer\n");
goto error;
}
mixer->rotator_mode = rotator;
switch (mixer->num) {
case MDSS_MDP_WB_LAYERMIXER0:
ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE :
MDSS_MDP_CTL_OP_WB0_MODE);
break;
case MDSS_MDP_WB_LAYERMIXER1:
ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE :
MDSS_MDP_CTL_OP_WB1_MODE);
break;
default:
pr_err("invalid layer mixer=%d\n", mixer->num);
goto error;
}
ctl->mixer_left = mixer;
ctl->start_fnc = mdss_mdp_writeback_start;
ctl->power_on = true;
ctl->wb_type = (rotator ? MDSS_MDP_WB_CTL_TYPE_BLOCK :
MDSS_MDP_WB_CTL_TYPE_LINE);
mixer->ctl = ctl;
if (ctl->start_fnc)
ctl->start_fnc(ctl);
return mixer;
error:
if (mixer)
mdss_mdp_mixer_free(mixer);
if (ctl)
mdss_mdp_ctl_free(ctl);
return NULL;
}
int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer)
{
struct mdss_mdp_ctl *ctl;
if (!mixer || !mixer->ctl) {
pr_err("invalid ctl handle\n");
return -ENODEV;
}
ctl = mixer->ctl;
mixer->rotator_mode = 0;
pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
if (ctl->stop_fnc)
ctl->stop_fnc(ctl);
mdss_mdp_ctl_free(ctl);
mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
return 0;
}
int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
{
switch (ctl->panel_data->panel_info.type) {
case MIPI_VIDEO_PANEL:
return mdss_mdp_video_reconfigure_splash_done(ctl, handoff);
case MIPI_CMD_PANEL:
return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff);
default:
return 0;
}
}
static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_ctl *split_ctl)
{
if (!ctl || !split_ctl)
return -ENODEV;
/* setup split ctl mixer as right mixer of original ctl so that
* original ctl can work the same way as dual pipe solution */
ctl->mixer_right = split_ctl->mixer_left;
return 0;
}
static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
struct mdss_mdp_ctl *ctl)
{
if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
return ctl->mixer_right->ctl;
return NULL;
}
static int mdss_mdp_ctl_fbc_enable(int enable,
struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
{
struct fbc_panel_info *fbc;
u32 mode = 0, budget_ctl = 0, lossy_mode = 0;
if (!pdata) {
pr_err("Invalid pdata\n");
return -EINVAL;
}
fbc = &pdata->fbc;
if (!fbc || !fbc->enabled) {
pr_err("Invalid FBC structure\n");
return -EINVAL;
}
if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0)
pr_debug("Mixer supports FBC.\n");
else {
pr_debug("Mixer doesn't support FBC.\n");
return -EINVAL;
}
if (enable) {
mode = ((pdata->xres) << 16) | ((fbc->comp_mode) << 8) |
((fbc->qerr_enable) << 7) | ((fbc->cd_bias) << 4) |
((fbc->pat_enable) << 3) | ((fbc->vlc_enable) << 2) |
((fbc->bflc_enable) << 1) | enable;
budget_ctl = ((fbc->line_x_budget) << 12) |
((fbc->block_x_budget) << 8) | fbc->block_budget;
lossy_mode = ((fbc->lossless_mode_thd) << 16) |
((fbc->lossy_mode_thd) << 8) |
((fbc->lossy_rgb_thd) << 3) | fbc->lossy_mode_idx;
}
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_MODE, mode);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_BUDGET_CTL,
budget_ctl);
mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_LOSSY_MODE,
lossy_mode);
return 0;
}
int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *split_ctl;
u32 width, height;
int split_fb;
if (!ctl || !ctl->panel_data) {
pr_err("invalid ctl handle\n");
return -ENODEV;
}
split_ctl = mdss_mdp_get_split_ctl(ctl);
width = ctl->panel_data->panel_info.xres;
height = ctl->panel_data->panel_info.yres;
split_fb = (ctl->mfd->split_fb_left &&
ctl->mfd->split_fb_right &&
(ctl->mfd->split_fb_left <= MAX_MIXER_WIDTH) &&
(ctl->mfd->split_fb_right <= MAX_MIXER_WIDTH)) ? 1 : 0;
pr_debug("max=%d xres=%d left=%d right=%d\n", MAX_MIXER_WIDTH,
width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right);
if ((split_ctl && (width > MAX_MIXER_WIDTH)) ||
(width > (2 * MAX_MIXER_WIDTH))) {
pr_err("Unsupported panel resolution: %dx%d\n", width, height);
return -ENOTSUPP;
}
ctl->width = width;
ctl->height = height;
ctl->roi = (struct mdss_mdp_img_rect) {0, 0, width, height};
if (!ctl->mixer_left) {
ctl->mixer_left =
mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
((width > MAX_MIXER_WIDTH) || split_fb));
if (!ctl->mixer_left) {
pr_err("unable to allocate layer mixer\n");
return -ENOMEM;
}
}
if (split_fb)
width = ctl->mfd->split_fb_left;
else if (width > MAX_MIXER_WIDTH)
width /= 2;
ctl->mixer_left->width = width;
ctl->mixer_left->height = height;
ctl->mixer_left->roi = (struct mdss_mdp_img_rect) {0, 0, width, height};
if (split_ctl) {
pr_debug("split display detected\n");
return 0;
}
if (split_fb)
width = ctl->mfd->split_fb_right;
if (width < ctl->width) {
if (ctl->mixer_right == NULL) {
ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
MDSS_MDP_MIXER_TYPE_INTF, true);
if (!ctl->mixer_right) {
pr_err("unable to allocate right mixer\n");
if (ctl->mixer_left)
mdss_mdp_mixer_free(ctl->mixer_left);
return -ENOMEM;
}
}
ctl->mixer_right->width = width;
ctl->mixer_right->height = height;
ctl->mixer_right->roi = (struct mdss_mdp_img_rect)
{0, 0, width, height};
} else if (ctl->mixer_right) {
mdss_mdp_mixer_free(ctl->mixer_right);
ctl->mixer_right = NULL;
}
if (ctl->mixer_right) {
ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
} else {
ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
}
return 0;
}
static int mdss_mdp_ctl_setup_wfd(struct mdss_mdp_ctl *ctl)
{
struct mdss_data_type *mdata = ctl->mdata;
struct mdss_mdp_mixer *mixer;
int mixer_type;
/* if WB2 is supported, try to allocate it first */
if (mdata->nmixers_intf >= MDSS_MDP_INTF_LAYERMIXER2)
mixer_type = MDSS_MDP_MIXER_TYPE_INTF;
else
mixer_type = MDSS_MDP_MIXER_TYPE_WRITEBACK;
mixer = mdss_mdp_mixer_alloc(ctl, mixer_type, false);
if (!mixer && mixer_type == MDSS_MDP_MIXER_TYPE_INTF)
mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
false);
if (!mixer) {
pr_err("Unable to allocate writeback mixer\n");
return -ENOMEM;
}
if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
} else {
switch (mixer->num) {
case MDSS_MDP_WB_LAYERMIXER0:
ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
break;
case MDSS_MDP_WB_LAYERMIXER1:
ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
break;
default:
pr_err("Incorrect writeback config num=%d\n",
mixer->num);
mdss_mdp_mixer_free(mixer);
return -EINVAL;
}
ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE;
}
ctl->mixer_left = mixer;
return 0;
}
struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
struct msm_fb_data_type *mfd)
{
struct mdss_mdp_ctl *ctl;
int ret = 0;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
ctl = mdss_mdp_ctl_alloc(mdata, MDSS_MDP_CTL0);
if (!ctl) {
pr_err("unable to allocate ctl\n");
return ERR_PTR(-ENOMEM);
}
ctl->mfd = mfd;
ctl->panel_data = pdata;
ctl->is_video_mode = false;
switch (pdata->panel_info.type) {
case EDP_PANEL:
ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF0;
ctl->intf_type = MDSS_INTF_EDP;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
ctl->start_fnc = mdss_mdp_video_start;
break;
case MIPI_VIDEO_PANEL:
ctl->is_video_mode = true;
if (pdata->panel_info.pdest == DISPLAY_1)
ctl->intf_num = MDSS_MDP_INTF1;
else
ctl->intf_num = MDSS_MDP_INTF2;
ctl->intf_type = MDSS_INTF_DSI;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
ctl->start_fnc = mdss_mdp_video_start;
break;
case MIPI_CMD_PANEL:
if (pdata->panel_info.pdest == DISPLAY_1)
ctl->intf_num = MDSS_MDP_INTF1;
else
ctl->intf_num = MDSS_MDP_INTF2;
ctl->intf_type = MDSS_INTF_DSI;
ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
ctl->start_fnc = mdss_mdp_cmd_start;
break;
case DTV_PANEL:
ctl->is_video_mode = true;
ctl->intf_num = MDSS_MDP_INTF3;
ctl->intf_type = MDSS_INTF_HDMI;
ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
ctl->start_fnc = mdss_mdp_video_start;
ret = mdss_mdp_limited_lut_igc_config(ctl);
if (ret)
pr_err("Unable to config IGC LUT data");
break;
case WRITEBACK_PANEL:
ctl->intf_num = MDSS_MDP_NO_INTF;
ctl->start_fnc = mdss_mdp_writeback_start;
ret = mdss_mdp_ctl_setup_wfd(ctl);
if (ret)
goto ctl_init_fail;
break;
default:
pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
ret = -EINVAL;
goto ctl_init_fail;
}
ctl->opmode |= (ctl->intf_num << 4);
if (ctl->intf_num == MDSS_MDP_NO_INTF) {
ctl->dst_format = pdata->panel_info.out_format;
} else {
struct mdp_dither_cfg_data dither = {
.block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0,
.flags = MDP_PP_OPS_DISABLE,
};
switch (pdata->panel_info.bpp) {
case 18:
ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
dither.g_y_depth = 2;
dither.r_cr_depth = 2;
dither.b_cb_depth = 2;
break;
case 24:
default:
ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
break;
}
mdss_mdp_dither_config(&dither, NULL);
}
return ctl;
ctl_init_fail:
mdss_mdp_ctl_free(ctl);
return ERR_PTR(ret);
}
int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
struct mdss_panel_data *pdata)
{
struct mdss_mdp_ctl *sctl;
struct mdss_mdp_mixer *mixer;
if (!ctl || !pdata)
return -ENODEV;
if (pdata->panel_info.xres > MAX_MIXER_WIDTH) {
pr_err("Unsupported second panel resolution: %dx%d\n",
pdata->panel_info.xres, pdata->panel_info.yres);
return -ENOTSUPP;
}
if (ctl->mixer_right) {
pr_err("right mixer already setup for ctl=%d\n", ctl->num);
return -EPERM;
}
sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
if (!sctl) {
pr_err("unable to setup split display\n");
return -ENODEV;
}
sctl->width = pdata->panel_info.xres;
sctl->height = pdata->panel_info.yres;
ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
false);
if (!ctl->mixer_left) {
pr_err("unable to allocate layer mixer\n");
mdss_mdp_ctl_destroy(sctl);
return -ENOMEM;
}
mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false);
if (!mixer) {
pr_err("unable to allocate layer mixer\n");
mdss_mdp_ctl_destroy(sctl);
return -ENOMEM;
}
mixer->width = sctl->width;
mixer->height = sctl->height;
mixer->roi = (struct mdss_mdp_img_rect)
{0, 0, mixer->width, mixer->height};
sctl->mixer_left = mixer;
return mdss_mdp_set_split_ctl(ctl, sctl);
}
static void mdss_mdp_ctl_split_display_enable(int enable,
struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
{
u32 upper = 0, lower = 0;
pr_debug("split main ctl=%d intf=%d slave ctl=%d intf=%d\n",
main_ctl->num, main_ctl->intf_num,
slave_ctl->num, slave_ctl->intf_num);
if (enable) {
if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
upper |= BIT(1);
lower |= BIT(1);
/* interface controlling sw trigger */
if (main_ctl->intf_num == MDSS_MDP_INTF2)
upper |= BIT(4);
else
upper |= BIT(8);
} else { /* video mode */
if (main_ctl->intf_num == MDSS_MDP_INTF2)
lower |= BIT(4);
else
lower |= BIT(8);
}
}
MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper);
MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower);
MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_EN, enable);
}
int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *sctl;
int rc;
rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
sctl = mdss_mdp_get_split_ctl(ctl);
if (sctl) {
pr_debug("destroying split display ctl=%d\n", sctl->num);
if (sctl->mixer_left)
mdss_mdp_mixer_free(sctl->mixer_left);
mdss_mdp_ctl_free(sctl);
} else if (ctl->mixer_right) {
mdss_mdp_mixer_free(ctl->mixer_right);
ctl->mixer_right = NULL;
}
if (ctl->mixer_left) {
mdss_mdp_mixer_free(ctl->mixer_left);
ctl->mixer_left = NULL;
}
mdss_mdp_ctl_free(ctl);
return 0;
}
int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
{
struct mdss_panel_data *pdata;
int rc = 0;
if (!ctl || !ctl->panel_data)
return -ENODEV;
pdata = ctl->panel_data;
pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
do {
if (pdata->event_handler)
rc = pdata->event_handler(pdata, event, arg);
pdata = pdata->next;
} while (rc == 0 && pdata);
return rc;
}
static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_mixer *mixer;
u32 outsize, temp;
int ret = 0;
int i, nmixers;
if (ctl->start_fnc)
ret = ctl->start_fnc(ctl);
else
pr_warn("no start function for ctl=%d type=%d\n", ctl->num,
ctl->panel_data->panel_info.type);
if (ret) {
pr_err("unable to start intf\n");
return ret;
}
pr_debug("ctl_num=%d\n", ctl->num);
if (!ctl->panel_data->panel_info.cont_splash_enabled) {
nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER +
MDSS_MDP_WB_MAX_LAYERMIXER;
for (i = 0; i < nmixers; i++)
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
}
mixer = ctl->mixer_left;
mdss_mdp_pp_resume(ctl, mixer->num);
mixer->params_changed++;
temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL);
temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp);
outsize = (mixer->height << 16) | mixer->width;
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
if (ctl->panel_data->panel_info.fbc.enabled) {
ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
&ctl->panel_data->panel_info);
}
return ret;
}
int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *sctl;
int ret = 0;
if (ctl->power_on) {
pr_debug("%d: panel already on!\n", __LINE__);
return 0;
}
ret = mdss_mdp_ctl_setup(ctl);
if (ret)
return ret;
sctl = mdss_mdp_get_split_ctl(ctl);
mutex_lock(&ctl->lock);
ctl->power_on = true;
ctl->bus_ab_quota = 0;
ctl->bus_ib_quota = 0;
ctl->clk_rate = 0;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
if (ret) {
pr_err("panel power on failed ctl=%d\n", ctl->num);
goto error;
}
ret = mdss_mdp_ctl_start_sub(ctl);
if (ret == 0) {
if (sctl) { /* split display is available */
ret = mdss_mdp_ctl_start_sub(sctl);
if (!ret)
mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
} else if (ctl->mixer_right) {
struct mdss_mdp_mixer *mixer = ctl->mixer_right;
u32 out, off;
mdss_mdp_pp_resume(ctl, mixer->num);
mixer->params_changed++;
out = (mixer->height << 16) | mixer->width;
off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, out);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
}
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
error:
mutex_unlock(&ctl->lock);
return ret;
}
int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl)
{
struct mdss_mdp_ctl *sctl;
int ret = 0;
u32 off;
if (!ctl->power_on) {
pr_debug("%s %d already off!\n", __func__, __LINE__);
return 0;
}
sctl = mdss_mdp_get_split_ctl(ctl);
pr_debug("ctl_num=%d\n", ctl->num);
mutex_lock(&ctl->lock);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (ctl->stop_fnc)
ret = ctl->stop_fnc(ctl);
else
pr_warn("no stop func for ctl=%d\n", ctl->num);
if (sctl && sctl->stop_fnc) {
ret = sctl->stop_fnc(sctl);
mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
}
if (ret) {
pr_warn("error powering off intf ctl=%d\n", ctl->num);
} else {
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
if (sctl)
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
if (ctl->mixer_left) {
off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_left);
mdss_mdp_ctl_write(ctl, off, 0);
}
if (ctl->mixer_right) {
off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_right);
mdss_mdp_ctl_write(ctl, off, 0);
}
ctl->power_on = false;
ctl->play_cnt = 0;
ctl->clk_rate = 0;
mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&ctl->lock);
return ret;
}
void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
struct mdp_display_commit *data)
{
struct mdss_mdp_img_rect temp_roi, mixer_roi;
temp_roi.x = data->roi.x;
temp_roi.y = data->roi.y;
temp_roi.w = data->roi.w;
temp_roi.h = data->roi.h;
/*
* No Partial Update for:
* 1) dual DSI panels
* 2) non-cmd mode panels
*/
if (!temp_roi.w || !temp_roi.h || ctl->mixer_right ||
(ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
!ctl->panel_data->panel_info.partial_update_enabled) {
temp_roi = (struct mdss_mdp_img_rect)
{0, 0, ctl->mixer_left->width,
ctl->mixer_left->height};
}
ctl->roi_changed = 0;
if (((temp_roi.x != ctl->roi.x) ||
(temp_roi.y != ctl->roi.y)) ||
((temp_roi.w != ctl->roi.w) ||
(temp_roi.h != ctl->roi.h))) {
ctl->roi = temp_roi;
ctl->roi_changed++;
mixer_roi = ctl->mixer_left->roi;
if ((mixer_roi.w != temp_roi.w) ||
(mixer_roi.h != temp_roi.h)) {
ctl->mixer_left->roi = temp_roi;
ctl->mixer_left->params_changed++;
}
}
pr_debug("ROI requested: [%d, %d, %d, %d]\n",
ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h);
}
/*
* mdss_mdp_ctl_reset() - reset mdp ctl path.
* @ctl: mdp controller.
* this function called when underflow happen,
* it will reset mdp ctl path and poll for its completion
*
* Note: called within atomic context.
*/
int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl)
{
u32 status = 1;
int cnt = 20;
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1);
/*
* it takes around 30us to have mdp finish resetting its ctl path
* poll every 50us so that reset should be completed at 1st poll
*/
do {
udelay(50);
status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
status &= 0x01;
pr_debug("status=%x\n", status);
cnt--;
if (cnt == 0) {
pr_err("timeout\n");
return -EAGAIN;
}
} while (status);
return 0;
}
static int mdss_mdp_mixer_setup(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_mixer *mixer)
{
struct mdss_mdp_pipe *pipe;
u32 off, blend_op, blend_stage;
u32 mixercfg = 0, blend_color_out = 0, bg_alpha_enable = 0;
u32 fg_alpha = 0, bg_alpha = 0;
int stage, secure = 0;
int screen_state;
int outsize = 0;
screen_state = ctl->force_screen_state;
if (!mixer)
return -ENODEV;
pr_debug("setup mixer=%d\n", mixer->num);
outsize = (mixer->roi.h << 16) | mixer->roi.w;
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
mixercfg = MDSS_MDP_LM_BORDER_COLOR;
goto update_mixer;
}
pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE];
if (pipe == NULL) {
mixercfg = MDSS_MDP_LM_BORDER_COLOR;
} else {
if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
pipe->num == MDSS_MDP_SSPP_RGB3) {
/* Add 2 to account for Cursor & Border bits */
mixercfg = 1 << ((3 * pipe->num)+2);
} else {
mixercfg = 1 << (3 * pipe->num);
}
if (pipe->src_fmt->alpha_enable)
bg_alpha_enable = 1;
secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION;
}
for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) {
pipe = mixer->stage_pipe[stage];
if (pipe == NULL)
continue;
if (stage != pipe->mixer_stage) {
mixer->stage_pipe[stage] = NULL;
continue;
}
blend_stage = stage - MDSS_MDP_STAGE_0;
off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
fg_alpha = pipe->alpha;
bg_alpha = 0xFF - pipe->alpha;
/* keep fg alpha */
blend_color_out |= 1 << (blend_stage + 1);
switch (pipe->blend_op) {
case BLEND_OP_OPAQUE:
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
stage);
break;
case BLEND_OP_PREMULTIPLIED:
if (pipe->src_fmt->alpha_enable) {
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |=
MDSS_MDP_BLEND_BG_MOD_ALPHA |
MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
}
}
pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
stage);
break;
case BLEND_OP_COVERAGE:
if (pipe->src_fmt->alpha_enable) {
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
if (fg_alpha != 0xff) {
bg_alpha = fg_alpha;
blend_op |=
MDSS_MDP_BLEND_FG_MOD_ALPHA |
MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
MDSS_MDP_BLEND_BG_MOD_ALPHA |
MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
} else {
blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
}
}
pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
stage);
break;
default:
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
stage);
break;
}
if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
blend_color_out = 0;
mixercfg |= stage << (3 * pipe->num);
pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
blend_op, fg_alpha, bg_alpha);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
fg_alpha);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA,
bg_alpha);
}
if (mixer->cursor_enabled)
mixercfg |= MDSS_MDP_LM_CURSOR_OUT;
update_mixer:
pr_debug("mixer=%d mixer_cfg=%x\n", mixer->num, mixercfg);
if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
ctl->flush_bits |= BIT(20);
else
ctl->flush_bits |= BIT(6) << mixer->num;
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OP_MODE, blend_color_out);
off = __mdss_mdp_ctl_get_mixer_off(mixer);
mdss_mdp_ctl_write(ctl, off, mixercfg);
return 0;
}
int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
u32 type, u32 len)
{
struct mdss_mdp_mixer *head;
u32 i;
int rc = 0;
u32 size = len;
if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk)
size++;
head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
size, GFP_KERNEL);
if (!head) {
pr_err("unable to setup mixer type=%d :kzalloc fail\n",
type);
return -ENOMEM;
}
for (i = 0; i < len; i++) {
head[i].type = type;
head[i].base = mdata->mdp_base + mixer_offsets[i];
head[i].ref_cnt = 0;
head[i].num = i;
if (type == MDSS_MDP_MIXER_TYPE_INTF) {
head[i].dspp_base = mdata->mdp_base + dspp_offsets[i];
head[i].pingpong_base = mdata->mdp_base +
pingpong_offsets[i];
}
}
/*
* Duplicate the last writeback mixer for concurrent line and block mode
* operations
*/
if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk)
head[len] = head[len - 1];
switch (type) {
case MDSS_MDP_MIXER_TYPE_INTF:
mdata->mixer_intf = head;
break;
case MDSS_MDP_MIXER_TYPE_WRITEBACK:
mdata->mixer_wb = head;
break;
default:
pr_err("Invalid mixer type=%d\n", type);
rc = -EINVAL;
break;
}
return rc;
}
int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
u32 *ctl_offsets, u32 *wb_offsets, u32 len)
{
struct mdss_mdp_ctl *head;
struct mutex *shared_lock = NULL;
u32 i;
u32 size = len;
if (!mdata->has_wfd_blk) {
size++;
shared_lock = devm_kzalloc(&mdata->pdev->dev,
sizeof(struct mutex),
GFP_KERNEL);
if (!shared_lock) {
pr_err("unable to allocate mem for mutex\n");
return -ENOMEM;
}
mutex_init(shared_lock);
}
head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
size, GFP_KERNEL);
if (!head) {
pr_err("unable to setup ctl and wb: kzalloc fail\n");
return -ENOMEM;
}
for (i = 0; i < len; i++) {
head[i].num = i;
head[i].base = (mdata->mdp_base) + ctl_offsets[i];
head[i].wb_base = (mdata->mdp_base) + wb_offsets[i];
head[i].ref_cnt = 0;
}
if (!mdata->has_wfd_blk) {
head[len - 1].shared_lock = shared_lock;
/*
* Allocate a virtual ctl to be able to perform simultaneous
* line mode and block mode operations on the same
* writeback block
*/
head[len] = head[len - 1];
head[len].num = head[len - 1].num;
}
mdata->ctl_off = head;
return 0;
}
struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
{
struct mdss_mdp_mixer *mixer = NULL;
struct mdss_overlay_private *mdp5_data = NULL;
if (!ctl || !ctl->mfd) {
pr_err("ctl not initialized\n");
return NULL;
}
mdp5_data = mfd_to_mdp5_data(ctl->mfd);
if (!mdp5_data) {
pr_err("ctl not initialized\n");
return NULL;
}
switch (mux) {
case MDSS_MDP_MIXER_MUX_DEFAULT:
case MDSS_MDP_MIXER_MUX_LEFT:
mixer = mdp5_data->mixer_swap ?
ctl->mixer_right : ctl->mixer_left;
break;
case MDSS_MDP_MIXER_MUX_RIGHT:
mixer = mdp5_data->mixer_swap ?
ctl->mixer_left : ctl->mixer_right;
break;
}
return mixer;
}
struct mdss_mdp_pipe *mdss_mdp_mixer_stage_pipe(struct mdss_mdp_ctl *ctl,
int mux, int stage)
{
struct mdss_mdp_pipe *pipe = NULL;
struct mdss_mdp_mixer *mixer;
if (!ctl)
return NULL;
if (mutex_lock_interruptible(&ctl->lock))
return NULL;
mixer = mdss_mdp_mixer_get(ctl, mux);
if (mixer)
pipe = mixer->stage_pipe[stage];
mutex_unlock(&ctl->lock);
return pipe;
}
int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe, int params_changed)
{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
int i;
if (!pipe)
return -EINVAL;
mixer = pipe->mixer;
if (!mixer)
return -EINVAL;
ctl = mixer->ctl;
if (!ctl)
return -EINVAL;
if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) {
pr_err("invalid mixer stage\n");
return -EINVAL;
}
pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num,
pipe->mixer_stage);
if (mutex_lock_interruptible(&ctl->lock))
return -EINTR;
if (params_changed) {
mixer->params_changed++;
for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
if (i == pipe->mixer_stage)
mixer->stage_pipe[i] = pipe;
else if (mixer->stage_pipe[i] == pipe)
mixer->stage_pipe[i] = NULL;
}
}
if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
ctl->flush_bits |= BIT(pipe->num) << 5;
else if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
pipe->num == MDSS_MDP_SSPP_RGB3)
ctl->flush_bits |= BIT(pipe->num) << 10;
else /* RGB/VIG 0-2 pipes */
ctl->flush_bits |= BIT(pipe->num);
mutex_unlock(&ctl->lock);
return 0;
}
int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe)
{
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
if (!pipe)
return -EINVAL;
mixer = pipe->mixer;
if (!mixer)
return -EINVAL;
ctl = mixer->ctl;
if (!ctl)
return -EINVAL;
pr_debug("unstage pnum=%d stage=%d mixer=%d\n", pipe->num,
pipe->mixer_stage, mixer->num);
if (mutex_lock_interruptible(&ctl->lock))
return -EINTR;
if (pipe == mixer->stage_pipe[pipe->mixer_stage]) {
mixer->params_changed++;
mixer->stage_pipe[pipe->mixer_stage] = NULL;
}
mutex_unlock(&ctl->lock);
return 0;
}
static int mdss_mdp_mixer_update(struct mdss_mdp_mixer *mixer)
{
u32 off = 0;
if (!mixer)
return -EINVAL;
mixer->params_changed = 0;
/* skip mixer setup for rotator */
if (!mixer->rotator_mode) {
mdss_mdp_mixer_setup(mixer->ctl, mixer);
} else {
off = __mdss_mdp_ctl_get_mixer_off(mixer);
mdss_mdp_ctl_write(mixer->ctl, off, 0);
}
return 0;
}
int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl, int fps)
{
int ret = 0;
if (ctl->config_fps_fnc)
ret = ctl->config_fps_fnc(ctl, fps);
return ret;
}
int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
ktime_t *wakeup_time)
{
struct mdss_panel_info *pinfo;
u32 clk_rate, clk_period;
u32 current_line, total_line;
u32 time_of_line, time_to_vsync;
ktime_t current_time = ktime_get();
if (!ctl->read_line_cnt_fnc)
return -ENOSYS;
pinfo = &ctl->panel_data->panel_info;
if (!pinfo)
return -ENODEV;
clk_rate = mdss_mdp_get_pclk_rate(ctl);
clk_rate /= 1000; /* in kHz */
if (!clk_rate)
return -EINVAL;
/*
* calculate clk_period as pico second to maintain good
* accuracy with high pclk rate and this number is in 17 bit
* range.
*/
clk_period = 1000000000 / clk_rate;
if (!clk_period)
return -EINVAL;
time_of_line = (pinfo->lcdc.h_back_porch +
pinfo->lcdc.h_front_porch +
pinfo->lcdc.h_pulse_width +
pinfo->xres) * clk_period;
time_of_line /= 1000; /* in nano second */
if (!time_of_line)
return -EINVAL;
current_line = ctl->read_line_cnt_fnc(ctl);
total_line = pinfo->lcdc.v_back_porch +
pinfo->lcdc.v_front_porch +
pinfo->lcdc.v_pulse_width +
pinfo->yres;
if (current_line > total_line)
return -EINVAL;
time_to_vsync = time_of_line * (total_line - current_line);
if (!time_to_vsync)
return -EINVAL;
*wakeup_time = ktime_add_ns(current_time, time_to_vsync);
pr_debug("clk_rate=%dkHz clk_period=%d cur_line=%d tot_line=%d\n",
clk_rate, clk_period, current_line, total_line);
pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
time_to_vsync, (int)ktime_to_ms(current_time),
(int)ktime_to_ms(*wakeup_time));
return 0;
}
int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
{
int ret;
if (!ctl) {
pr_err("invalid ctl\n");
return -ENODEV;
}
ret = mutex_lock_interruptible(&ctl->lock);
if (ret)
return ret;
if (!ctl->power_on) {
mutex_unlock(&ctl->lock);
return 0;
}
if (ctl->wait_fnc)
ret = ctl->wait_fnc(ctl, NULL);
if (ctl->perf_changed) {
mdss_mdp_ctl_perf_commit(ctl->mdata, ctl->perf_changed);
ctl->perf_changed = 0;
}
mutex_unlock(&ctl->lock);
return ret;
}
int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl)
{
int ret;
ret = mutex_lock_interruptible(&ctl->lock);
if (ret)
return ret;
if (!ctl->power_on) {
mutex_unlock(&ctl->lock);
return 0;
}
if (ctl->wait_pingpong)
ret = ctl->wait_pingpong(ctl, NULL);
mutex_unlock(&ctl->lock);
return ret;
}
int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
{
struct mdss_mdp_ctl *sctl = NULL;
int mixer1_changed, mixer2_changed;
int ret = 0;
int perf_update = MDSS_MDP_PERF_UPDATE_SKIP;
if (!ctl) {
pr_err("display function not set\n");
return -ENODEV;
}
mutex_lock(&ctl->lock);
pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
if (!ctl->power_on) {
mutex_unlock(&ctl->lock);
return 0;
}
sctl = mdss_mdp_get_split_ctl(ctl);
mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (mixer1_changed || mixer2_changed
|| ctl->force_screen_state) {
perf_update = mdss_mdp_ctl_perf_update(ctl);
if (ctl->prepare_fnc)
ret = ctl->prepare_fnc(ctl, arg);
if (ret) {
pr_err("error preparing display\n");
goto done;
}
if (perf_update == MDSS_MDP_PERF_UPDATE_EARLY) {
mdss_mdp_ctl_perf_commit(ctl->mdata, ctl->perf_changed);
ctl->perf_changed = 0;
}
if (mixer1_changed)
mdss_mdp_mixer_update(ctl->mixer_left);
if (mixer2_changed)
mdss_mdp_mixer_update(ctl->mixer_right);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
ctl->flush_bits |= BIT(17); /* CTL */
if (sctl) {
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
sctl->opmode);
sctl->flush_bits |= BIT(17);
}
}
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
if (ctl->wait_pingpong)
ctl->wait_pingpong(ctl, NULL);
/* postprocessing setup, including dspp */
mdss_mdp_pp_setup_locked(ctl);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits);
if (sctl) {
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
sctl->flush_bits);
}
wmb();
ctl->flush_bits = 0;
if (ctl->display_fnc)
ret = ctl->display_fnc(ctl, arg); /* kickoff */
if (ret)
pr_warn("error displaying frame\n");
ctl->play_cnt++;
done:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&ctl->lock);
return ret;
}
void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
struct notifier_block *notifier)
{
blocking_notifier_chain_register(&ctl->notifier_head, notifier);
}
void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
struct notifier_block *notifier)
{
blocking_notifier_chain_unregister(&ctl->notifier_head, notifier);
}
int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event)
{
return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl);
}
int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
{
int i;
struct mdss_mdp_ctl *ctl;
struct mdss_data_type *mdata;
u32 mixer_cnt = 0;
mutex_lock(&mdss_mdp_ctl_lock);
mdata = mdss_mdp_get_mdata();
for (i = 0; i < mdata->nctl; i++) {
ctl = mdata->ctl_off + i;
if ((ctl->power_on) && (ctl->mfd) &&
(ctl->mfd->index == fb_num)) {
if (ctl->mixer_left) {
mixer_id[mixer_cnt] = ctl->mixer_left->num;
mixer_cnt++;
}
if (mixer_cnt && ctl->mixer_right) {
mixer_id[mixer_cnt] = ctl->mixer_right->num;
mixer_cnt++;
}
if (mixer_cnt)
break;
}
}
mutex_unlock(&mdss_mdp_ctl_lock);
return mixer_cnt;
}
/**
* @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
* @ctl: Pointer to ctl structure to be switched.
* @return_type: wb_type of the ctl to be switched to.
*
* Virtual mixer switch should be performed only when there is no
* dedicated wfd block and writeback block is shared.
*/
struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
u32 return_type)
{
int i;
struct mdss_data_type *mdata = ctl->mdata;
if (ctl->wb_type == return_type) {
mdata->mixer_switched = false;
return ctl;
}
for (i = 0; i <= mdata->nctl; i++) {
if (mdata->ctl_off[i].wb_type == return_type) {
pr_debug("switching mixer from ctl=%d to ctl=%d\n",
ctl->num, mdata->ctl_off[i].num);
mdata->mixer_switched = true;
return mdata->ctl_off + i;
}
}
pr_err("unable to switch mixer to type=%d\n", return_type);
return NULL;
}
static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer)
{
if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
return MDSS_MDP_CTL_X_LAYER_5;
else
return MDSS_MDP_REG_CTL_LAYER(mixer->num);
} else {
return MDSS_MDP_REG_CTL_LAYER(mixer->num +
MDSS_MDP_INTF_LAYERMIXER3);
}
}
static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
struct mdss_mdp_pipe *pipe)
{
int rc = 0;
if (!mixer) {
rc = -EINVAL;
goto error;
}
if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) {
pr_err("More than one pipe staged on mixer num %d\n",
mixer->num);
rc = -EINVAL;
goto error;
}
pr_debug("Staging pipe num %d on mixer num %d\n",
pipe->num, mixer->num);
mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] = pipe;
pipe->mixer = mixer;
pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
error:
return rc;
}
/**
* mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer
* @ctl: pointer to the control structure associated with the overlay device.
* @num: the mixer number on which the pipe needs to be staged.
* @pipe: pointer to the pipe to be staged.
*
* Function stages a given pipe on either the left mixer or the right mixer
* for the control structre based on the mixer number. If the input mixer
* number does not match either of the mixers then an error is returned.
* This function is called during overlay handoff when certain pipes are
* already staged by the bootloader.
*/
int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
struct mdss_mdp_pipe *pipe)
{
int rc = 0;
struct mdss_mdp_mixer *mx_left = ctl->mixer_left;
struct mdss_mdp_mixer *mx_right = ctl->mixer_right;
/*
* For performance calculations, stage the handed off pipe
* as MDSS_MDP_STAGE_UNUSED
*/
if (mx_left && (mx_left->num == num)) {
rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe);
} else if (mx_right && (mx_right->num == num)) {
rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe);
} else {
pr_err("pipe num %d staged on unallocated mixer num %d\n",
pipe->num, num);
rc = -EINVAL;
}
return rc;
}
| gpl-2.0 |
OpenDMM/linux | drivers/acpi/parser/psxface.c | 11 | 11398 | /******************************************************************************
*
* Module Name: psxface - Parser external interfaces
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include <acpi/acparser.h>
#include <acpi/acdispat.h>
#include <acpi/acinterp.h>
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
static void
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
/*******************************************************************************
*
* FUNCTION: acpi_debug_trace
*
* PARAMETERS: method_name - Valid ACPI name string
* debug_level - Optional level mask. 0 to use default
* debug_layer - Optional layer mask. 0 to use default
* Flags - bit 1: one shot(1) or persistent(0)
*
* RETURN: Status
*
* DESCRIPTION: External interface to enable debug tracing during control
* method execution
*
******************************************************************************/
acpi_status
acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
{
acpi_status status;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* TBDs: Validate name, allow full path or just nameseg */
acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
acpi_gbl_trace_flags = flags;
if (debug_level) {
acpi_gbl_trace_dbg_level = debug_level;
}
if (debug_layer) {
acpi_gbl_trace_dbg_layer = debug_layer;
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_start_trace
*
* PARAMETERS: Info - Method info struct
*
* RETURN: None
*
* DESCRIPTION: Start control method execution trace
*
******************************************************************************/
static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return;
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
acpi_gbl_original_dbg_level = acpi_dbg_level;
acpi_gbl_original_dbg_layer = acpi_dbg_layer;
acpi_dbg_level = 0x00FFFFFF;
acpi_dbg_layer = ACPI_UINT32_MAX;
if (acpi_gbl_trace_dbg_level) {
acpi_dbg_level = acpi_gbl_trace_dbg_level;
}
if (acpi_gbl_trace_dbg_layer) {
acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
}
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_stop_trace
*
* PARAMETERS: Info - Method info struct
*
* RETURN: None
*
* DESCRIPTION: Stop control method execution trace
*
******************************************************************************/
static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return;
}
if ((!acpi_gbl_trace_method_name) ||
(acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
/* Disable further tracing if type is one-shot */
if (acpi_gbl_trace_flags & 1) {
acpi_gbl_trace_method_name = 0;
acpi_gbl_trace_dbg_level = 0;
acpi_gbl_trace_dbg_layer = 0;
}
acpi_dbg_level = acpi_gbl_original_dbg_level;
acpi_dbg_layer = acpi_gbl_original_dbg_layer;
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_execute_method
*
* PARAMETERS: Info - Method info block, contains:
* Node - Method Node to execute
* obj_desc - Method object
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* pass_number - Parse or execute pass
*
* RETURN: Status
*
* DESCRIPTION: Execute a control method
*
******************************************************************************/
acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ps_execute_method);
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
status =
acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* The caller "owns" the parameters, so give each one an extra
* reference
*/
acpi_ps_update_parameter_list(info, REF_INCREMENT);
/* Begin tracing if requested */
acpi_ps_start_trace(info);
/*
* 1) Perform the first pass parse of the method to enter any
* named objects that it creates into the namespace
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Parse **** Entry=%p obj=%p\n",
info->resolved_node, info->obj_desc));
info->pass_number = 1;
status = acpi_ps_execute_pass(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* 2) Execute the method. Performs second pass parse simultaneously
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Execution **** Entry=%p obj=%p\n",
info->resolved_node, info->obj_desc));
info->pass_number = 3;
status = acpi_ps_execute_pass(info);
cleanup:
/* End optional tracing */
acpi_ps_stop_trace(info);
/* Take away the extra reference that we gave the parameters above */
acpi_ps_update_parameter_list(info, REF_DECREMENT);
/* Exit now if error above */
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* If the method has returned an object, signal this to the caller with
* a control exception code
*/
if (info->return_object) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
info->return_object));
ACPI_DUMP_STACK_ENTRY(info->return_object);
status = AE_CTRL_RETURN_VALUE;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_update_parameter_list
*
* PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
* Action - Add or Remove reference
*
* RETURN: Status
*
* DESCRIPTION: Update reference count on all method parameter objects
*
******************************************************************************/
static void
acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
{
acpi_native_uint i;
if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) {
/* Update reference count for each parameter */
for (i = 0; info->parameters[i]; i++) {
/* Ignore errors, just do them all */
(void)acpi_ut_update_object_reference(info->
parameters[i],
action);
}
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_execute_pass
*
* PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: pass_number, Node, and obj_desc)
*
* RETURN: Status
*
* DESCRIPTION: Single AML pass: Parse or Execute a control method
*
******************************************************************************/
static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE(ps_execute_pass);
/* Create and init a Root Node */
op = acpi_ps_create_scope_op();
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Create and initialize a new walk state */
walk_state =
acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
info->obj_desc->method.aml_start,
info->obj_desc->method.aml_length,
info->pass_number == 1 ? NULL : info,
info->pass_number);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Parse the AML */
status = acpi_ps_parse_aml(walk_state);
/* Walk state was deleted by parse_aml */
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
chaodhib/TrinityCore | dep/jemalloc/src/base.c | 11 | 15501 | #define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
static base_t *b0;
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
const char *metadata_thp_mode_names[] = {
"disabled",
"auto",
"always"
};
/******************************************************************************/
static inline bool
metadata_thp_madvise(void) {
return (metadata_thp_enabled() &&
(init_system_thp_mode == thp_mode_default));
}
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
if (extent_hooks == &extent_hooks_default) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
} else {
/* No arena context as we are creating new arenas. */
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
&zero, &commit, ind);
post_reentrancy(tsd);
}
return addr;
}
static void
base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
* stopping at first success. This cascade is performed for consistency
* with the cascade in extent_dalloc_wrapper() because an application's
* custom hooks may not support e.g. dalloc. This function is only ever
* called as a side effect of arena destruction, so although it might
* seem pointless to do anything besides dalloc here, the application
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
if (extent_hooks == &extent_hooks_default) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
if (!pages_decommit(addr, size)) {
goto label_done;
}
if (!pages_purge_forced(addr, size)) {
goto label_done;
}
if (!pages_purge_lazy(addr, size)) {
goto label_done;
}
/* Nothing worked. This should never happen. */
not_reached();
} else {
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
pre_reentrancy(tsd, NULL);
if (extent_hooks->dalloc != NULL &&
!extent_hooks->dalloc(extent_hooks, addr, size, true,
ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->decommit != NULL &&
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->purge_forced != NULL &&
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
size, ind)) {
goto label_post_reentrancy;
}
if (extent_hooks->purge_lazy != NULL &&
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
ind)) {
goto label_post_reentrancy;
}
/* Nothing worked. That's the application's problem. */
label_post_reentrancy:
post_reentrancy(tsd);
}
label_done:
if (metadata_thp_madvise()) {
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(size & HUGEPAGE_MASK) == 0);
pages_nohuge(addr, size);
}
}
static void
base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
extent_binit(extent, addr, size, sn);
}
static size_t
base_get_num_blocks(base_t *base, bool with_new_block) {
base_block_t *b = base->blocks;
assert(b != NULL);
size_t n_blocks = with_new_block ? 2 : 1;
while (b->next != NULL) {
n_blocks++;
b = b->next;
}
return n_blocks;
}
static void
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
assert(opt_metadata_thp == metadata_thp_auto);
malloc_mutex_assert_owner(tsdn, &base->mtx);
if (base->auto_thp_switched) {
return;
}
/* Called when adding a new block. */
bool should_switch;
if (base_ind_get(base) != 0) {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD);
} else {
should_switch = (base_get_num_blocks(base, true) ==
BASE_AUTO_THP_THRESHOLD_A0);
}
if (!should_switch) {
return;
}
base->auto_thp_switched = true;
assert(!config_stats || base->n_thp == 0);
/* Make the initial blocks THP lazily. */
base_block_t *block = base->blocks;
while (block != NULL) {
assert((block->size & HUGEPAGE_MASK) == 0);
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
}
}
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
*gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
assert(extent_bsize_get(extent) >= *gap_size + size);
extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_bsize_get(extent) - *gap_size - size,
extent_sn_get(extent));
return ret;
}
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
void *addr, size_t size) {
if (extent_bsize_get(extent) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
}
if (config_stats) {
base->allocated += size;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation. Adjust n_thp similarly when
* metadata_thp is enabled.
*/
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
if (metadata_thp_madvise() && (opt_metadata_thp ==
metadata_thp_always || base->auto_thp_switched)) {
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
LG_HUGEPAGE;
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
}
}
}
static void *
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
return ret;
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t header_size = sizeof(base_block_t);
size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
header_size;
/*
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
*pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
block_size);
if (block == NULL) {
return NULL;
}
if (metadata_thp_madvise()) {
void *addr = (void *)block;
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
(block_size & HUGEPAGE_MASK) == 0);
if (opt_metadata_thp == metadata_thp_always) {
pages_huge(addr, block_size);
} else if (opt_metadata_thp == metadata_thp_auto &&
base != NULL) {
/* base != NULL indicates this is not a new base. */
malloc_mutex_lock(tsdn, &base->mtx);
base_auto_thp_switch(tsdn, base);
if (base->auto_thp_switched) {
pages_huge(addr, block_size);
}
malloc_mutex_unlock(tsdn, &base->mtx);
}
}
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
/*
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
static extent_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
if (block == NULL) {
return NULL;
}
block->next = base->blocks;
base->blocks = block;
if (config_stats) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
if (metadata_thp_madvise() &&
!(opt_metadata_thp == metadata_thp_auto
&& !base->auto_thp_switched)) {
assert(base->n_thp > 0);
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
LG_HUGEPAGE;
}
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
}
base_t *
b0get(void) {
return b0;
}
base_t *
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
}
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
&gap_size, base_size, base_alignment);
base->ind = ind;
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
base_unmap(tsdn, extent_hooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
>> LG_HUGEPAGE : 0;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
base_size);
return base;
}
void
base_delete(tsdn_t *tsdn, base_t *base) {
extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
extent_hooks_t *
base_extent_hooks_get(base_t *base) {
return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
ATOMIC_ACQUIRE);
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
return old_extent_hooks;
}
static void *
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t *esn) {
alignment = QUANTUM_CEILING(alignment);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
break;
}
}
if (extent == NULL) {
/* Try to allocate more space. */
extent = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = base_extent_bump_alloc(base, extent, usize, alignment);
if (esn != NULL) {
*esn = extent_sn_get(extent);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
}
/*
* base_alloc() returns zeroed memory, which is always demand-zeroed for the
* auto arenas, in order to make multi-page sparse data structures such as radix
* tree nodes efficient with respect to physical memory usage. Upon success a
* pointer to at least size bytes with specified alignment is returned. Note
* that size is rounded up to the nearest multiple of alignment to avoid false
* sharing.
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
extent_t *
base_alloc_extent(tsdn_t *tsdn, base_t *base) {
size_t esn;
extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
CACHELINE, &esn);
if (extent == NULL) {
return NULL;
}
extent_esn_set(extent, esn);
return extent;
}
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
size_t *mapped, size_t *n_thp) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
*n_thp = base->n_thp;
malloc_mutex_unlock(tsdn, &base->mtx);
}
void
base_prefork(tsdn_t *tsdn, base_t *base) {
malloc_mutex_prefork(tsdn, &base->mtx);
}
void
base_postfork_parent(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
base_postfork_child(tsdn_t *tsdn, base_t *base) {
malloc_mutex_postfork_child(tsdn, &base->mtx);
}
bool
base_boot(tsdn_t *tsdn) {
b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
return (b0 == NULL);
}
| gpl-2.0 |
XXMrHyde/android_kernel_lge_hammerhead | drivers/gpu/ion/ion_cp_heap.c | 267 | 22135 | /*
* drivers/gpu/ion/ion_cp_heap.c
*
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/msm_ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memory_alloc.h>
#include <linux/seq_file.h>
#include <linux/iommu.h>
#include <linux/dma-mapping.h>
#include <trace/events/kmem.h>
#include <asm/mach/map.h>
#include <mach/msm_memtypes.h>
#include <mach/scm.h>
#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include "msm/ion_cp_common.h"
/**
* struct ion_cp_heap - container for the heap and shared heap data
* @heap: the heap information structure
* @pool: memory pool to allocate from.
* @base: the base address of the memory pool.
* @permission_type: Identifier for the memory used by SCM for protecting
* and unprotecting memory.
* @secure_base: Base address used when securing a heap that is shared.
* @secure_size: Size used when securing a heap that is shared.
* @lock: mutex to protect shared access.
* @heap_protected: Indicates whether heap has been protected or not.
* @allocated_bytes: the total number of allocated bytes from the pool.
* @total_size: the total size of the memory pool.
* @request_region: function pointer to call when first mapping of memory
* occurs.
* @release_region: function pointer to call when last mapping of memory
* unmapped.
* @bus_id: token used with request/release region.
* @kmap_cached_count: the total number of times this heap has been mapped in
* kernel space (cached).
* @kmap_uncached_count:the total number of times this heap has been mapped in
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
*/
struct ion_cp_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
unsigned int permission_type;
ion_phys_addr_t secure_base;
size_t secure_size;
struct mutex lock;
unsigned int heap_protected;
unsigned long allocated_bytes;
unsigned long total_size;
int (*heap_request_region)(void *);
int (*heap_release_region)(void *);
void *bus_id;
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
unsigned int has_outer_cache;
atomic_t protect_cnt;
void *cpu_addr;
size_t heap_size;
dma_addr_t handle;
int cma;
int allow_non_secure_allocation;
};
enum {
HEAP_NOT_PROTECTED = 0,
HEAP_PROTECTED = 1,
};
#define DMA_ALLOC_TRIES 5
static int allocate_heap_memory(struct ion_heap *heap)
{
struct device *dev = heap->priv;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
int ret;
int tries = 0;
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
if (cp_heap->cpu_addr)
return 0;
while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
cp_heap->cpu_addr = dma_alloc_attrs(dev,
cp_heap->heap_size,
&(cp_heap->handle),
0,
&attrs);
if (!cp_heap->cpu_addr) {
trace_ion_cp_alloc_retry(tries);
msleep(20);
}
}
if (!cp_heap->cpu_addr)
goto out;
cp_heap->base = cp_heap->handle;
cp_heap->pool = gen_pool_create(12, -1);
if (!cp_heap->pool)
goto out_free;
ret = gen_pool_add(cp_heap->pool, cp_heap->base,
cp_heap->heap_size, -1);
if (ret < 0)
goto out_pool;
return 0;
out_pool:
gen_pool_destroy(cp_heap->pool);
out_free:
dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
cp_heap->handle);
out:
return ION_CP_ALLOCATE_FAIL;
}
static void free_heap_memory(struct ion_heap *heap)
{
struct device *dev = heap->priv;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
/* release memory */
dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
cp_heap->handle);
gen_pool_destroy(cp_heap->pool);
cp_heap->pool = NULL;
cp_heap->cpu_addr = 0;
}
/**
* Get the total number of kernel mappings.
* Must be called with heap->lock locked.
*/
static unsigned long ion_cp_get_total_kmap_count(
const struct ion_cp_heap *cp_heap)
{
return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
}
static int ion_on_first_alloc(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
int ret_value;
if (cp_heap->cma) {
ret_value = allocate_heap_memory(heap);
if (ret_value)
return 1;
}
return 0;
}
static void ion_on_last_free(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (cp_heap->cma)
free_heap_memory(heap);
}
/**
* Protects memory if heap is unsecured heap.
* Must be called with heap->lock locked.
*/
static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
int ret_value = 0;
if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
/* Make sure we are in C state when the heap is protected. */
if (!cp_heap->allocated_bytes)
if (ion_on_first_alloc(heap))
goto out;
ret_value = ion_cp_protect_mem(cp_heap->secure_base,
cp_heap->secure_size, cp_heap->permission_type,
version, data);
if (ret_value) {
pr_err("Failed to protect memory for heap %s - "
"error code: %d\n", heap->name, ret_value);
if (!cp_heap->allocated_bytes)
ion_on_last_free(heap);
atomic_dec(&cp_heap->protect_cnt);
} else {
cp_heap->heap_protected = HEAP_PROTECTED;
pr_debug("Protected heap %s @ 0x%pa\n",
heap->name, &cp_heap->base);
}
}
out:
pr_debug("%s: protect count is %d\n", __func__,
atomic_read(&cp_heap->protect_cnt));
BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
return ret_value;
}
/**
* Unprotects memory if heap is secure heap.
* Must be called with heap->lock locked.
*/
static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
int error_code = ion_cp_unprotect_mem(
cp_heap->secure_base, cp_heap->secure_size,
cp_heap->permission_type, version, data);
if (error_code) {
pr_err("Failed to un-protect memory for heap %s - "
"error code: %d\n", heap->name, error_code);
} else {
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
(unsigned int) cp_heap->base);
if (!cp_heap->allocated_bytes)
ion_on_last_free(heap);
}
}
pr_debug("%s: protect count is %d\n", __func__,
atomic_read(&cp_heap->protect_cnt));
BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
}
ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
unsigned long size,
unsigned long align,
unsigned long flags)
{
unsigned long offset;
unsigned long secure_allocation = flags & ION_FLAG_SECURE;
unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
mutex_unlock(&cp_heap->lock);
pr_err("ION cannot allocate un-secure memory from protected"
" heap %s\n", heap->name);
return ION_CP_ALLOCATE_FAIL;
}
if (!force_contig && !secure_allocation &&
!cp_heap->allow_non_secure_allocation) {
mutex_unlock(&cp_heap->lock);
pr_debug("%s: non-secure allocation disallowed from this heap\n",
__func__);
return ION_CP_ALLOCATE_FAIL;
}
/*
* The check above already checked for non-secure allocations when the
* heap is protected. HEAP_PROTECTED implies that this must be a secure
* allocation. If the heap is protected and there are userspace or
* cached kernel mappings, something has gone wrong in the security
* model.
*/
if (cp_heap->heap_protected == HEAP_PROTECTED) {
BUG_ON(cp_heap->umap_count != 0);
BUG_ON(cp_heap->kmap_cached_count != 0);
}
/*
* if this is the first reusable allocation, transition
* the heap
*/
if (!cp_heap->allocated_bytes)
if (ion_on_first_alloc(heap)) {
mutex_unlock(&cp_heap->lock);
return ION_RESERVED_ALLOCATE_FAIL;
}
cp_heap->allocated_bytes += size;
mutex_unlock(&cp_heap->lock);
offset = gen_pool_alloc_aligned(cp_heap->pool,
size, ilog2(align));
if (!offset) {
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
if ((cp_heap->total_size -
cp_heap->allocated_bytes) >= size)
pr_debug("%s: heap %s has enough memory (%lx) but"
" the allocation of size %lx still failed."
" Memory is probably fragmented.\n",
__func__, heap->name,
cp_heap->total_size -
cp_heap->allocated_bytes, size);
if (!cp_heap->allocated_bytes &&
cp_heap->heap_protected == HEAP_NOT_PROTECTED)
ion_on_last_free(heap);
mutex_unlock(&cp_heap->lock);
return ION_CP_ALLOCATE_FAIL;
}
return offset;
}
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (addr == ION_CP_ALLOCATE_FAIL)
return;
gen_pool_free(cp_heap->pool, addr, size);
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
if (!cp_heap->allocated_bytes &&
cp_heap->heap_protected == HEAP_NOT_PROTECTED)
ion_on_last_free(heap);
mutex_unlock(&cp_heap->lock);
}
static int ion_cp_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
struct ion_cp_buffer *buf = buffer->priv_virt;
*addr = buf->buffer;
*len = buffer->size;
return 0;
}
static int ion_cp_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
struct ion_cp_buffer *buf;
phys_addr_t addr;
/*
* we never want Ion to fault pages in for us with this
* heap. We want to set up the mappings ourselves in .map_user
*/
flags |= ION_FLAG_CACHED_NEEDS_SYNC;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ION_CP_ALLOCATE_FAIL;
addr = ion_cp_allocate(heap, size, align, flags);
if (addr == ION_CP_ALLOCATE_FAIL)
return -ENOMEM;
buf->buffer = addr;
buf->want_delayed_unsecure = 0;
atomic_set(&buf->secure_cnt, 0);
mutex_init(&buf->lock);
buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
buffer->priv_virt = buf;
return 0;
}
static void ion_cp_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_cp_buffer *buf = buffer->priv_virt;
ion_cp_free(heap, buf->buffer, buffer->size);
WARN_ON(atomic_read(&buf->secure_cnt));
WARN_ON(atomic_read(&buf->map_cnt));
kfree(buf);
buffer->priv_virt = NULL;
}
struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
{
size_t chunk_size = buffer->size;
struct ion_cp_buffer *buf = buffer->priv_virt;
if (ION_IS_CACHED(buffer->flags))
chunk_size = PAGE_SIZE;
else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
chunk_size = SZ_1M;
return ion_create_chunked_sg_table(buf->buffer, chunk_size,
buffer->size);
}
struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return ion_cp_heap_create_sg_table(buffer);
}
void ion_cp_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
if (buffer->sg_table)
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
buffer->sg_table = 0;
}
/**
* Call request region for SMI memory of this is the first mapping.
*/
static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
if (cp_heap->heap_request_region)
ret_value = cp_heap->heap_request_region(
cp_heap->bus_id);
return ret_value;
}
/**
* Call release region for SMI memory of this is the last un-mapping.
*/
static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
if (cp_heap->heap_release_region)
ret_value = cp_heap->heap_release_region(
cp_heap->bus_id);
return ret_value;
}
void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
void *ret_value = NULL;
struct ion_cp_buffer *buf = buffer->priv_virt;
mutex_lock(&cp_heap->lock);
if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
((cp_heap->heap_protected == HEAP_PROTECTED) &&
!ION_IS_CACHED(buffer->flags))) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return NULL;
}
if (cp_heap->cma) {
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct page **pages = vmalloc(
sizeof(struct page *) * npages);
int i;
pgprot_t pgprot;
if (!pages) {
mutex_unlock(&cp_heap->lock);
return ERR_PTR(-ENOMEM);
}
if (ION_IS_CACHED(buffer->flags))
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
for (i = 0; i < npages; i++) {
pages[i] = phys_to_page(buf->buffer +
i * PAGE_SIZE);
}
ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
vfree(pages);
} else {
if (ION_IS_CACHED(buffer->flags))
ret_value = ioremap_cached(buf->buffer,
buffer->size);
else
ret_value = ioremap(buf->buffer,
buffer->size);
}
if (!ret_value) {
ion_cp_release_region(cp_heap);
} else {
if (ION_IS_CACHED(buffer->flags))
++cp_heap->kmap_cached_count;
else
++cp_heap->kmap_uncached_count;
atomic_inc(&buf->map_cnt);
}
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
struct ion_cp_buffer *buf = buffer->priv_virt;
if (cp_heap->cma)
vunmap(buffer->vaddr);
else
__arm_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
mutex_lock(&cp_heap->lock);
if (ION_IS_CACHED(buffer->flags))
--cp_heap->kmap_cached_count;
else
--cp_heap->kmap_uncached_count;
atomic_dec(&buf->map_cnt);
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
return;
}
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
struct ion_cp_buffer *buf = buffer->priv_virt;
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buf->buffer) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret_value) {
ion_cp_release_region(cp_heap);
} else {
atomic_inc(&buf->map_cnt);
++cp_heap->umap_count;
}
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
void ion_cp_heap_unmap_user(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
struct ion_cp_buffer *buf = buffer->priv_virt;
mutex_lock(&cp_heap->lock);
--cp_heap->umap_count;
atomic_dec(&buf->map_cnt);
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
}
static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map)
{
unsigned long total_alloc;
unsigned long total_size;
unsigned long umap_count;
unsigned long kmap_count;
unsigned long heap_protected;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
total_alloc = cp_heap->allocated_bytes;
total_size = cp_heap->total_size;
umap_count = cp_heap->umap_count;
kmap_count = ion_cp_get_total_kmap_count(cp_heap);
heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
mutex_unlock(&cp_heap->lock);
seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
seq_printf(s, "total heap size: %lx\n", total_size);
seq_printf(s, "umapping count: %lx\n", umap_count);
seq_printf(s, "kmapping count: %lx\n", kmap_count);
seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
if (mem_map) {
unsigned long base = cp_heap->base;
unsigned long size = cp_heap->total_size;
unsigned long end = base+size;
unsigned long last_end = base;
struct rb_node *n;
seq_printf(s, "\nMemory Map\n");
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
"client", "start address", "end address",
"size (hex)");
for (n = rb_first(mem_map); n; n = rb_next(n)) {
struct mem_map_data *data =
rb_entry(n, struct mem_map_data, node);
const char *client_name = "(null)";
if (last_end < data->addr) {
phys_addr_t da;
da = data->addr-1;
seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
"FREE", &last_end, &da,
data->addr-last_end,
data->addr-last_end);
}
if (data->client_name)
client_name = data->client_name;
seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
client_name, &data->addr,
&data->addr_end,
data->size, data->size);
last_end = data->addr_end+1;
}
if (last_end < end) {
seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
last_end, end-1, end-last_end, end-last_end);
}
}
return 0;
}
int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
{
int ret_value;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
ret_value = ion_cp_protect(heap, version, data);
} else {
pr_err("ION cannot secure heap with outstanding mappings: "
"User space: %lu, kernel space (cached): %lu\n",
cp_heap->umap_count, cp_heap->kmap_cached_count);
ret_value = -EINVAL;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
{
int ret_value = 0;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
ion_cp_unprotect(heap, version, data);
mutex_unlock(&cp_heap->lock);
return ret_value;
}
static struct ion_heap_ops cp_heap_ops = {
.allocate = ion_cp_heap_allocate,
.free = ion_cp_heap_free,
.phys = ion_cp_heap_phys,
.map_user = ion_cp_heap_map_user,
.unmap_user = ion_cp_heap_unmap_user,
.map_kernel = ion_cp_heap_map_kernel,
.unmap_kernel = ion_cp_heap_unmap_kernel,
.map_dma = ion_cp_heap_map_dma,
.unmap_dma = ion_cp_heap_unmap_dma,
.print_debug = ion_cp_print_debug,
.secure_heap = ion_cp_secure_heap,
.unsecure_heap = ion_cp_unsecure_heap,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
};
struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_cp_heap *cp_heap;
int ret;
cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
if (!cp_heap)
return ERR_PTR(-ENOMEM);
mutex_init(&cp_heap->lock);
cp_heap->allocated_bytes = 0;
cp_heap->umap_count = 0;
cp_heap->kmap_cached_count = 0;
cp_heap->kmap_uncached_count = 0;
cp_heap->total_size = heap_data->size;
cp_heap->heap.ops = &cp_heap_ops;
cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
cp_heap->secure_base = heap_data->base;
cp_heap->secure_size = heap_data->size;
cp_heap->has_outer_cache = heap_data->has_outer_cache;
cp_heap->heap_size = heap_data->size;
atomic_set(&cp_heap->protect_cnt, 0);
if (heap_data->extra_data) {
struct ion_cp_heap_pdata *extra_data =
heap_data->extra_data;
cp_heap->permission_type = extra_data->permission_type;
if (extra_data->secure_size) {
cp_heap->secure_base = extra_data->secure_base;
cp_heap->secure_size = extra_data->secure_size;
}
if (extra_data->setup_region)
cp_heap->bus_id = extra_data->setup_region();
if (extra_data->request_region)
cp_heap->heap_request_region =
extra_data->request_region;
if (extra_data->release_region)
cp_heap->heap_release_region =
extra_data->release_region;
cp_heap->cma = extra_data->is_cma;
cp_heap->allow_non_secure_allocation =
extra_data->allow_nonsecure_alloc;
}
if (cp_heap->cma) {
cp_heap->pool = NULL;
cp_heap->cpu_addr = 0;
cp_heap->heap.priv = heap_data->priv;
} else {
cp_heap->pool = gen_pool_create(12, -1);
if (!cp_heap->pool)
goto free_heap;
cp_heap->base = heap_data->base;
ret = gen_pool_add(cp_heap->pool, cp_heap->base,
heap_data->size, -1);
if (ret < 0)
goto destroy_pool;
}
return &cp_heap->heap;
destroy_pool:
gen_pool_destroy(cp_heap->pool);
free_heap:
kfree(cp_heap);
return ERR_PTR(-ENOMEM);
}
void ion_cp_heap_destroy(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
gen_pool_destroy(cp_heap->pool);
kfree(cp_heap);
cp_heap = NULL;
}
void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
unsigned long *size) \
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
*base = cp_heap->base;
*size = cp_heap->total_size;
}
| gpl-2.0 |
libos-nuse/net-next-nuse | sound/soc/samsung/h1940_uda1380.c | 523 | 6029 | /*
* h1940-uda1380.c -- ALSA Soc Audio Layer
*
* Copyright (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
* Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
*
* Based on version from Arnaud Patard <arnaud.patard@rtp-net.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include "regs-iis.h"
#include <asm/mach-types.h>
#include <mach/gpio-samsung.h>
#include "s3c24xx-i2s.h"
static const unsigned int rates[] = {
11025,
22050,
44100,
};
static const struct snd_pcm_hw_constraint_list hw_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
};
static struct snd_soc_jack hp_jack;
static struct snd_soc_jack_pin hp_jack_pins[] = {
{
.pin = "Headphone Jack",
.mask = SND_JACK_HEADPHONE,
},
{
.pin = "Speaker",
.mask = SND_JACK_HEADPHONE,
.invert = 1,
},
};
static struct snd_soc_jack_gpio hp_jack_gpios[] = {
{
.gpio = S3C2410_GPG(4),
.name = "hp-gpio",
.report = SND_JACK_HEADPHONE,
.invert = 1,
.debounce_time = 200,
},
};
static int h1940_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
return snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&hw_rates);
}
static int h1940_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int div;
int ret;
unsigned int rate = params_rate(params);
switch (rate) {
case 11025:
case 22050:
case 44100:
div = s3c24xx_i2s_get_clockrate() / (384 * rate);
if (s3c24xx_i2s_get_clockrate() % (384 * rate) > (192 * rate))
div++;
break;
default:
dev_err(rtd->dev, "%s: rate %d is not supported\n",
__func__, rate);
return -EINVAL;
}
/* select clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
/* set MCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
S3C2410_IISMOD_384FS);
if (ret < 0)
return ret;
/* set BCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
S3C2410_IISMOD_32FS);
if (ret < 0)
return ret;
/* set prescaler division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
S3C24XX_PRESCALE(div, div));
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops h1940_ops = {
.startup = h1940_startup,
.hw_params = h1940_hw_params,
};
static int h1940_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
gpio_set_value(S3C_GPIO_END + 9, 1);
else
gpio_set_value(S3C_GPIO_END + 9, 0);
return 0;
}
/* h1940 machine dapm widgets */
static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
SND_SOC_DAPM_SPK("Speaker", h1940_spk_power),
};
/* h1940 machine audio_map */
static const struct snd_soc_dapm_route audio_map[] = {
/* headphone connected to VOUTLHP, VOUTRHP */
{"Headphone Jack", NULL, "VOUTLHP"},
{"Headphone Jack", NULL, "VOUTRHP"},
/* ext speaker connected to VOUTL, VOUTR */
{"Speaker", NULL, "VOUTL"},
{"Speaker", NULL, "VOUTR"},
/* mic is connected to VINM */
{"VINM", NULL, "Mic Jack"},
};
static struct platform_device *s3c24xx_snd_device;
static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
snd_soc_card_jack_new(rtd->card, "Headphone Jack", SND_JACK_HEADPHONE,
&hp_jack, hp_jack_pins, ARRAY_SIZE(hp_jack_pins));
snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
return 0;
}
static int h1940_uda1380_card_remove(struct snd_soc_card *card)
{
snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
hp_jack_gpios);
return 0;
}
/* s3c24xx digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link h1940_uda1380_dai[] = {
{
.name = "uda1380",
.stream_name = "UDA1380 Duplex",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "uda1380-hifi",
.init = h1940_uda1380_init,
.platform_name = "s3c24xx-iis",
.codec_name = "uda1380-codec.0-001a",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &h1940_ops,
},
};
static struct snd_soc_card h1940_asoc = {
.name = "h1940",
.owner = THIS_MODULE,
.remove = h1940_uda1380_card_remove,
.dai_link = h1940_uda1380_dai,
.num_links = ARRAY_SIZE(h1940_uda1380_dai),
.dapm_widgets = uda1380_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(uda1380_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static int __init h1940_init(void)
{
int ret;
if (!machine_is_h1940())
return -ENODEV;
/* configure some gpios */
ret = gpio_request(S3C_GPIO_END + 9, "speaker-power");
if (ret)
goto err_out;
ret = gpio_direction_output(S3C_GPIO_END + 9, 0);
if (ret)
goto err_gpio;
s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
if (!s3c24xx_snd_device) {
ret = -ENOMEM;
goto err_gpio;
}
platform_set_drvdata(s3c24xx_snd_device, &h1940_asoc);
ret = platform_device_add(s3c24xx_snd_device);
if (ret)
goto err_plat;
return 0;
err_plat:
platform_device_put(s3c24xx_snd_device);
err_gpio:
gpio_free(S3C_GPIO_END + 9);
err_out:
return ret;
}
static void __exit h1940_exit(void)
{
platform_device_unregister(s3c24xx_snd_device);
gpio_free(S3C_GPIO_END + 9);
}
module_init(h1940_init);
module_exit(h1940_exit);
/* Module information */
MODULE_AUTHOR("Arnaud Patard, Vasily Khoruzhick");
MODULE_DESCRIPTION("ALSA SoC H1940");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sdfd/linux-socfpga | drivers/video/fbdev/vesafb.c | 523 | 15004 | /*
* framebuffer driver for VBE 2.0 compliant graphic boards
*
* switching to graphics mode happens at boot time (while
* running in real mode, see arch/i386/boot/video.S).
*
* (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <video/vga.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
/* --------------------------------------------------------------------- */
static struct fb_var_screeninfo vesafb_defined = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.right_margin = 32,
.upper_margin = 16,
.lower_margin = 4,
.vsync_len = 4,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo vesafb_fix = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
};
static int inverse __read_mostly;
static int mtrr __read_mostly; /* disable mtrr */
static int vram_remap; /* Set amount of memory to be used */
static int vram_total; /* Set total amount of memory */
static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
static void (*pmi_start)(void) __read_mostly;
static void (*pmi_pal) (void) __read_mostly;
static int depth __read_mostly;
static int vga_compat __read_mostly;
/* --------------------------------------------------------------------- */
static int vesafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
#ifdef __i386__
int offset;
offset = (var->yoffset * info->fix.line_length + var->xoffset) / 4;
__asm__ __volatile__(
"call *(%%edi)"
: /* no return value */
: "a" (0x4f07), /* EAX */
"b" (0), /* EBX */
"c" (offset), /* ECX */
"d" (offset >> 16), /* EDX */
"D" (&pmi_start)); /* EDI */
#endif
return 0;
}
static int vesa_setpalette(int regno, unsigned red, unsigned green,
unsigned blue)
{
int shift = 16 - depth;
int err = -EINVAL;
/*
* Try VGA registers first...
*/
if (vga_compat) {
outb_p(regno, dac_reg);
outb_p(red >> shift, dac_val);
outb_p(green >> shift, dac_val);
outb_p(blue >> shift, dac_val);
err = 0;
}
#ifdef __i386__
/*
* Fallback to the PMI....
*/
if (err && pmi_setpal) {
struct { u_char blue, green, red, pad; } entry;
entry.red = red >> shift;
entry.green = green >> shift;
entry.blue = blue >> shift;
entry.pad = 0;
__asm__ __volatile__(
"call *(%%esi)"
: /* no return value */
: "a" (0x4f09), /* EAX */
"b" (0), /* EBX */
"c" (1), /* ECX */
"d" (regno), /* EDX */
"D" (&entry), /* EDI */
"S" (&pmi_pal)); /* ESI */
err = 0;
}
#endif
return err;
}
static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
int err = 0;
/*
* Set a single color register. The values supplied are
* already rounded down to the hardware's capabilities
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
if (regno >= info->cmap.len)
return 1;
if (info->var.bits_per_pixel == 8)
err = vesa_setpalette(regno,red,green,blue);
else if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 16:
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32*) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32*) (info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
}
break;
case 24:
case 32:
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
}
}
return err;
}
static void vesafb_destroy(struct fb_info *info)
{
fb_dealloc_cmap(&info->cmap);
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
}
static struct fb_ops vesafb_ops = {
.owner = THIS_MODULE,
.fb_destroy = vesafb_destroy,
.fb_setcolreg = vesafb_setcolreg,
.fb_pan_display = vesafb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int vesafb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
if (! strcmp(this_opt, "inverse"))
inverse=1;
else if (! strcmp(this_opt, "redraw"))
ypan=0;
else if (! strcmp(this_opt, "ypan"))
ypan=1;
else if (! strcmp(this_opt, "ywrap"))
ypan=2;
else if (! strcmp(this_opt, "vgapal"))
pmi_setpal=0;
else if (! strcmp(this_opt, "pmipal"))
pmi_setpal=1;
else if (! strncmp(this_opt, "mtrr:", 5))
mtrr = simple_strtoul(this_opt+5, NULL, 0);
else if (! strcmp(this_opt, "nomtrr"))
mtrr=0;
else if (! strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt+7, NULL, 0);
else if (! strncmp(this_opt, "vremap:", 7))
vram_remap = simple_strtoul(this_opt+7, NULL, 0);
}
return 0;
}
static int vesafb_probe(struct platform_device *dev)
{
struct fb_info *info;
int i, err;
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
char *option = NULL;
/* ignore error return of fb_get_options */
fb_get_options("vesafb", &option);
vesafb_setup(option);
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
return -ENODEV;
vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
vesafb_fix.smem_start = screen_info.lfb_base;
vesafb_defined.bits_per_pixel = screen_info.lfb_depth;
if (15 == vesafb_defined.bits_per_pixel)
vesafb_defined.bits_per_pixel = 16;
vesafb_defined.xres = screen_info.lfb_width;
vesafb_defined.yres = screen_info.lfb_height;
vesafb_fix.line_length = screen_info.lfb_linelength;
vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
/* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
* memory we need. */
size_vmode = vesafb_defined.yres * vesafb_fix.line_length;
/* size_total -- all video memory we have. Used for mtrr
* entries, resource allocation and bounds
* checking. */
size_total = screen_info.lfb_size * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
size_total = size_vmode;
/* size_remap -- the amount of video memory we are going to
* use for vesafb. With modern cards it is no
* option to simply use size_total as that
* wastes plenty of kernel address space. */
size_remap = size_vmode * 2;
if (vram_remap)
size_remap = vram_remap * 1024 * 1024;
if (size_remap < size_vmode)
size_remap = size_vmode;
if (size_remap > size_total)
size_remap = size_total;
vesafb_fix.smem_len = size_remap;
#ifndef __i386__
screen_info.vesapm_seg = 0;
#endif
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
printk(KERN_WARNING
"vesafb: cannot reserve video memory at 0x%lx\n",
vesafb_fix.smem_start);
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
}
info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
if (!info) {
release_mem_region(vesafb_fix.smem_start, size_total);
return -ENOMEM;
}
platform_set_drvdata(dev, info);
info->pseudo_palette = info->par;
info->par = NULL;
/* set vesafb aperture size for generic probing */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
err = -ENOMEM;
goto err;
}
info->apertures->ranges[0].base = screen_info.lfb_base;
info->apertures->ranges[0].size = size_total;
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
if (screen_info.vesapm_seg) {
printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
screen_info.vesapm_seg,screen_info.vesapm_off);
}
if (screen_info.vesapm_seg < 0xc000)
ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */
if (ypan || pmi_setpal) {
unsigned short *pmi_base;
pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
if (pmi_base[3]) {
printk(KERN_INFO "vesafb: pmi: ports = ");
for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++)
printk("%x ",pmi_base[i]);
printk("\n");
if (pmi_base[i] != 0xffff) {
/*
* memory areas not supported (yet?)
*
* Rules are: we have to set up a descriptor for the requested
* memory area and pass it in the ES register to the BIOS function.
*/
printk(KERN_INFO "vesafb: can't handle memory requests, pmi disabled\n");
ypan = pmi_setpal = 0;
}
}
}
if (vesafb_defined.bits_per_pixel == 8 && !pmi_setpal && !vga_compat) {
printk(KERN_WARNING "vesafb: hardware palette is unchangeable,\n"
" colors may be incorrect\n");
vesafb_fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
}
vesafb_defined.xres_virtual = vesafb_defined.xres;
vesafb_defined.yres_virtual = vesafb_fix.smem_len / vesafb_fix.line_length;
if (ypan && vesafb_defined.yres_virtual > vesafb_defined.yres) {
printk(KERN_INFO "vesafb: scrolling: %s using protected mode interface, yres_virtual=%d\n",
(ypan > 1) ? "ywrap" : "ypan",vesafb_defined.yres_virtual);
} else {
printk(KERN_INFO "vesafb: scrolling: redraw\n");
vesafb_defined.yres_virtual = vesafb_defined.yres;
ypan = 0;
}
/* some dummy values for timing to make fbset happy */
vesafb_defined.pixclock = 10000000 / vesafb_defined.xres * 1000 / vesafb_defined.yres;
vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
vesafb_defined.red.offset = screen_info.red_pos;
vesafb_defined.red.length = screen_info.red_size;
vesafb_defined.green.offset = screen_info.green_pos;
vesafb_defined.green.length = screen_info.green_size;
vesafb_defined.blue.offset = screen_info.blue_pos;
vesafb_defined.blue.length = screen_info.blue_size;
vesafb_defined.transp.offset = screen_info.rsvd_pos;
vesafb_defined.transp.length = screen_info.rsvd_size;
if (vesafb_defined.bits_per_pixel <= 8) {
depth = vesafb_defined.green.length;
vesafb_defined.red.length =
vesafb_defined.green.length =
vesafb_defined.blue.length =
vesafb_defined.bits_per_pixel;
}
printk(KERN_INFO "vesafb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
(vesafb_defined.bits_per_pixel > 8) ?
"Truecolor" : (vga_compat || pmi_setpal) ?
"Pseudocolor" : "Static Pseudocolor",
screen_info.rsvd_size,
screen_info.red_size,
screen_info.green_size,
screen_info.blue_size,
screen_info.rsvd_pos,
screen_info.red_pos,
screen_info.green_pos,
screen_info.blue_pos);
vesafb_fix.ypanstep = ypan ? 1 : 0;
vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0;
/* request failure does not faze us, as vgacon probably has this
* region already (FIXME) */
request_region(0x3c0, 32, "vesafb");
#ifdef CONFIG_MTRR
if (mtrr) {
unsigned int temp_size = size_total;
unsigned int type = 0;
switch (mtrr) {
case 1:
type = MTRR_TYPE_UNCACHABLE;
break;
case 2:
type = MTRR_TYPE_WRBACK;
break;
case 3:
type = MTRR_TYPE_WRCOMB;
break;
case 4:
type = MTRR_TYPE_WRTHROUGH;
break;
default:
type = 0;
break;
}
if (type) {
int rc;
/* Find the largest power-of-two */
temp_size = roundup_pow_of_two(temp_size);
/* Try and find a power of two to add */
do {
rc = mtrr_add(vesafb_fix.smem_start, temp_size,
type, 1);
temp_size >>= 1;
} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
}
}
#endif
switch (mtrr) {
case 1: /* uncachable */
info->screen_base = ioremap_nocache(vesafb_fix.smem_start, vesafb_fix.smem_len);
break;
case 2: /* write-back */
info->screen_base = ioremap_cache(vesafb_fix.smem_start, vesafb_fix.smem_len);
break;
case 3: /* write-combining */
info->screen_base = ioremap_wc(vesafb_fix.smem_start, vesafb_fix.smem_len);
break;
case 4: /* write-through */
default:
info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
break;
}
if (!info->screen_base) {
printk(KERN_ERR
"vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
vesafb_fix.smem_len, vesafb_fix.smem_start);
err = -EIO;
goto err;
}
printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, "
"using %dk, total %dk\n",
vesafb_fix.smem_start, info->screen_base,
size_remap/1024, size_total/1024);
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
(ypan ? FBINFO_HWACCEL_YPAN : 0);
if (!ypan)
info->fbops->fb_pan_display = NULL;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
goto err;
}
if (register_framebuffer(info)<0) {
err = -EINVAL;
fb_dealloc_cmap(&info->cmap);
goto err;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
err:
if (info->screen_base)
iounmap(info->screen_base);
framebuffer_release(info);
release_mem_region(vesafb_fix.smem_start, size_total);
return err;
}
static int vesafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
unregister_framebuffer(info);
framebuffer_release(info);
return 0;
}
static struct platform_driver vesafb_driver = {
.driver = {
.name = "vesa-framebuffer",
.owner = THIS_MODULE,
},
.probe = vesafb_probe,
.remove = vesafb_remove,
};
module_platform_driver(vesafb_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
poondog/KANGAROO-kernel | drivers/net/wireless/rt2x00/rt73usb.c | 523 | 77733 | /*
Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
<http://rt2x00.serialmonkey.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
Module: rt73usb
Abstract: rt73usb device specific routines.
Supported chipsets: rt2571W & rt2671.
*/
#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "rt2x00.h"
#include "rt2x00usb.h"
#include "rt73usb.h"
/*
* Allow hardware encryption to be disabled.
*/
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
/*
* Register access.
* All access to the CSR registers will go through the methods
* rt2x00usb_register_read and rt2x00usb_register_write.
* BBP and RF register require indirect register access,
* and use the CSR registers BBPCSR and RFCSR to achieve this.
* These indirect registers work with busy bits,
* and we will try maximal REGISTER_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
* between each attampt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
* and we will print an error.
* The _lock versions must be used if you already hold the csr_mutex
*/
#define WAIT_FOR_BBP(__dev, __reg) \
rt2x00usb_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg))
#define WAIT_FOR_RF(__dev, __reg) \
rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg))
static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the BBP becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_BBP(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, PHY_CSR3_VALUE, value);
rt2x00_set_field32(®, PHY_CSR3_REGNUM, word);
rt2x00_set_field32(®, PHY_CSR3_BUSY, 1);
rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0);
rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u8 *value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the BBP becomes available, afterwards we
* can safely write the read request into the register.
* After the data has been written, we wait until hardware
* returns the correct value, if at any time the register
* doesn't become available in time, reg will be 0xffffffff
* which means we return 0xff to the caller.
*/
if (WAIT_FOR_BBP(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, PHY_CSR3_REGNUM, word);
rt2x00_set_field32(®, PHY_CSR3_BUSY, 1);
rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1);
rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
WAIT_FOR_BBP(rt2x00dev, ®);
}
*value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
mutex_unlock(&rt2x00dev->csr_mutex);
}
static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u32 value)
{
u32 reg;
mutex_lock(&rt2x00dev->csr_mutex);
/*
* Wait until the RF becomes available, afterwards we
* can safely write the new data into the register.
*/
if (WAIT_FOR_RF(rt2x00dev, ®)) {
reg = 0;
rt2x00_set_field32(®, PHY_CSR4_VALUE, value);
/*
* RF5225 and RF2527 contain 21 bits per RF register value,
* all others contain 20 bits.
*/
rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS,
20 + (rt2x00_rf(rt2x00dev, RF5225) ||
rt2x00_rf(rt2x00dev, RF2527)));
rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0);
rt2x00_set_field32(®, PHY_CSR4_BUSY, 1);
rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR4, reg);
rt2x00_rf_write(rt2x00dev, word, value);
}
mutex_unlock(&rt2x00dev->csr_mutex);
}
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
static const struct rt2x00debug rt73usb_rt2x00debug = {
.owner = THIS_MODULE,
.csr = {
.read = rt2x00usb_register_read,
.write = rt2x00usb_register_write,
.flags = RT2X00DEBUGFS_OFFSET,
.word_base = CSR_REG_BASE,
.word_size = sizeof(u32),
.word_count = CSR_REG_SIZE / sizeof(u32),
},
.eeprom = {
.read = rt2x00_eeprom_read,
.write = rt2x00_eeprom_write,
.word_base = EEPROM_BASE,
.word_size = sizeof(u16),
.word_count = EEPROM_SIZE / sizeof(u16),
},
.bbp = {
.read = rt73usb_bbp_read,
.write = rt73usb_bbp_write,
.word_base = BBP_BASE,
.word_size = sizeof(u8),
.word_count = BBP_SIZE / sizeof(u8),
},
.rf = {
.read = rt2x00_rf_read,
.write = rt73usb_rf_write,
.word_base = RF_BASE,
.word_size = sizeof(u32),
.word_count = RF_SIZE / sizeof(u32),
},
};
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®);
return rt2x00_get_field32(reg, MAC_CSR13_BIT7);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
static void rt73usb_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct rt2x00_led *led =
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int a_mode =
(enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
unsigned int bg_mode =
(enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
MCU_LEDCS_RADIO_STATUS, enabled);
rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
0, led->rt2x00dev->led_mcu_reg,
REGISTER_TIMEOUT);
} else if (led->type == LED_TYPE_ASSOC) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
MCU_LEDCS_LINK_BG_STATUS, bg_mode);
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
MCU_LEDCS_LINK_A_STATUS, a_mode);
rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
0, led->rt2x00dev->led_mcu_reg,
REGISTER_TIMEOUT);
} else if (led->type == LED_TYPE_QUALITY) {
/*
* The brightness is divided into 6 levels (0 - 5),
* this means we need to convert the brightness
* argument into the matching level within that range.
*/
rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
brightness / (LED_FULL / 6),
led->rt2x00dev->led_mcu_reg,
REGISTER_TIMEOUT);
}
}
static int rt73usb_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct rt2x00_led *led =
container_of(led_cdev, struct rt2x00_led, led_dev);
u32 reg;
rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14, ®);
rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, *delay_on);
rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, *delay_off);
rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg);
return 0;
}
static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led,
enum led_type type)
{
led->rt2x00dev = rt2x00dev;
led->type = type;
led->led_dev.brightness_set = rt73usb_brightness_set;
led->led_dev.blink_set = rt73usb_blink_set;
led->flags = LED_INITIALIZED;
}
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
* Configuration handlers.
*/
static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_key_entry key_entry;
struct rt2x00_field32 field;
u32 mask;
u32 reg;
if (crypto->cmd == SET_KEY) {
/*
* rt2x00lib can't determine the correct free
* key_idx for shared keys. We have 1 register
* with key valid bits. The goal is simple, read
* the register, if that is full we have no slots
* left.
* Note that each BSS is allowed to have up to 4
* shared keys, so put a mask over the allowed
* entries.
*/
mask = (0xf << crypto->bssidx);
rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®);
reg &= mask;
if (reg && reg == mask)
return -ENOSPC;
key->hw_key_idx += reg ? ffz(reg) : 0;
/*
* Upload key to hardware
*/
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
memcpy(key_entry.tx_mic, crypto->tx_mic,
sizeof(key_entry.tx_mic));
memcpy(key_entry.rx_mic, crypto->rx_mic,
sizeof(key_entry.rx_mic));
reg = SHARED_KEY_ENTRY(key->hw_key_idx);
rt2x00usb_register_multiwrite(rt2x00dev, reg,
&key_entry, sizeof(key_entry));
/*
* The cipher types are stored over 2 registers.
* bssidx 0 and 1 keys are stored in SEC_CSR1 and
* bssidx 1 and 2 keys are stored in SEC_CSR5.
* Using the correct defines correctly will cause overhead,
* so just calculate the correct offset.
*/
if (key->hw_key_idx < 8) {
field.bit_offset = (3 * key->hw_key_idx);
field.bit_mask = 0x7 << field.bit_offset;
rt2x00usb_register_read(rt2x00dev, SEC_CSR1, ®);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg);
} else {
field.bit_offset = (3 * (key->hw_key_idx - 8));
field.bit_mask = 0x7 << field.bit_offset;
rt2x00usb_register_read(rt2x00dev, SEC_CSR5, ®);
rt2x00_set_field32(®, field, crypto->cipher);
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg);
}
/*
* The driver does not support the IV/EIV generation
* in hardware. However it doesn't support the IV/EIV
* inside the ieee80211 frame either, but requires it
* to be provided separately for the descriptor.
* rt2x00lib will cut the IV/EIV data out of all frames
* given to us by mac80211, but we must tell mac80211
* to generate the IV/EIV data.
*/
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
/*
* SEC_CSR0 contains only single-bit fields to indicate
* a particular key is valid. Because using the FIELD32()
* defines directly will cause a lot of overhead we use
* a calculation to determine the correct bit directly.
*/
mask = 1 << key->hw_key_idx;
rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
reg &= ~mask;
rt2x00usb_register_write(rt2x00dev, SEC_CSR0, reg);
return 0;
}
static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_pairwise_ta_entry addr_entry;
struct hw_key_entry key_entry;
u32 mask;
u32 reg;
if (crypto->cmd == SET_KEY) {
/*
* rt2x00lib can't determine the correct free
* key_idx for pairwise keys. We have 2 registers
* with key valid bits. The goal is simple, read
* the first register, if that is full move to
* the next register.
* When both registers are full, we drop the key,
* otherwise we use the first invalid entry.
*/
rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®);
if (reg && reg == ~0) {
key->hw_key_idx = 32;
rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®);
if (reg && reg == ~0)
return -ENOSPC;
}
key->hw_key_idx += reg ? ffz(reg) : 0;
/*
* Upload key to hardware
*/
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
memcpy(key_entry.tx_mic, crypto->tx_mic,
sizeof(key_entry.tx_mic));
memcpy(key_entry.rx_mic, crypto->rx_mic,
sizeof(key_entry.rx_mic));
reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
rt2x00usb_register_multiwrite(rt2x00dev, reg,
&key_entry, sizeof(key_entry));
/*
* Send the address and cipher type to the hardware register.
*/
memset(&addr_entry, 0, sizeof(addr_entry));
memcpy(&addr_entry, crypto->address, ETH_ALEN);
addr_entry.cipher = crypto->cipher;
reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
rt2x00usb_register_multiwrite(rt2x00dev, reg,
&addr_entry, sizeof(addr_entry));
/*
* Enable pairwise lookup table for given BSS idx,
* without this received frames will not be decrypted
* by the hardware.
*/
rt2x00usb_register_read(rt2x00dev, SEC_CSR4, ®);
reg |= (1 << crypto->bssidx);
rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg);
/*
* The driver does not support the IV/EIV generation
* in hardware. However it doesn't support the IV/EIV
* inside the ieee80211 frame either, but requires it
* to be provided separately for the descriptor.
* rt2x00lib will cut the IV/EIV data out of all frames
* given to us by mac80211, but we must tell mac80211
* to generate the IV/EIV data.
*/
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
/*
* SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
* a particular key is valid. Because using the FIELD32()
* defines directly will cause a lot of overhead we use
* a calculation to determine the correct bit directly.
*/
if (key->hw_key_idx < 32) {
mask = 1 << key->hw_key_idx;
rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
reg &= ~mask;
rt2x00usb_register_write(rt2x00dev, SEC_CSR2, reg);
} else {
mask = 1 << (key->hw_key_idx - 32);
rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®);
if (crypto->cmd == SET_KEY)
reg |= mask;
else if (crypto->cmd == DISABLE_KEY)
reg &= ~mask;
rt2x00usb_register_write(rt2x00dev, SEC_CSR3, reg);
}
return 0;
}
static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
const unsigned int filter_flags)
{
u32 reg;
/*
* Start configuration steps.
* Note that the version error will always be dropped
* and broadcast frames will always be accepted since
* there is no filter for it at this time.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME,
!(filter_flags & FIF_PROMISC_IN_BSS));
rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS,
!(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST,
!(filter_flags & FIF_ALLMULTI));
rt2x00_set_field32(®, TXRX_CSR0_DROP_BROADCAST, 0);
rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS,
!(filter_flags & FIF_CONTROL));
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
}
static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00_intf *intf,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
* Enable synchronisation.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
if (flags & CONFIG_UPDATE_MAC) {
reg = le32_to_cpu(conf->mac[1]);
rt2x00_set_field32(®, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff);
conf->mac[1] = cpu_to_le32(reg);
rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR2,
conf->mac, sizeof(conf->mac));
}
if (flags & CONFIG_UPDATE_BSSID) {
reg = le32_to_cpu(conf->bssid[1]);
rt2x00_set_field32(®, MAC_CSR5_BSS_ID_MASK, 3);
conf->bssid[1] = cpu_to_le32(reg);
rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR4,
conf->bssid, sizeof(conf->bssid));
}
}
static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_erp *erp,
u32 changed)
{
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32);
rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE,
!!erp->short_preamble);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
}
if (changed & BSS_CHANGED_BASIC_RATES)
rt2x00usb_register_write(rt2x00dev, TXRX_CSR5,
erp->basic_rates);
if (changed & BSS_CHANGED_BEACON_INT) {
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL,
erp->beacon_int * 16);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®);
rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, erp->slot_time);
rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
rt2x00usb_register_read(rt2x00dev, MAC_CSR8, ®);
rt2x00_set_field32(®, MAC_CSR8_SIFS, erp->sifs);
rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
rt2x00_set_field32(®, MAC_CSR8_EIFS, erp->eifs);
rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg);
}
}
static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
struct antenna_setup *ant)
{
u8 r3;
u8 r4;
u8 r77;
u8 temp;
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt73usb_bbp_read(rt2x00dev, 4, &r4);
rt73usb_bbp_read(rt2x00dev, 77, &r77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
/*
* Configure the RX antenna.
*/
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
&& (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
break;
case ANTENNA_B:
default:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
break;
}
rt73usb_bbp_write(rt2x00dev, 77, r77);
rt73usb_bbp_write(rt2x00dev, 3, r3);
rt73usb_bbp_write(rt2x00dev, 4, r4);
}
static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
struct antenna_setup *ant)
{
u8 r3;
u8 r4;
u8 r77;
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt73usb_bbp_read(rt2x00dev, 4, &r4);
rt73usb_bbp_read(rt2x00dev, 77, &r77);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
!test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
/*
* Configure the RX antenna.
*/
switch (ant->rx) {
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
break;
case ANTENNA_A:
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
break;
case ANTENNA_B:
default:
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
break;
}
rt73usb_bbp_write(rt2x00dev, 77, r77);
rt73usb_bbp_write(rt2x00dev, 3, r3);
rt73usb_bbp_write(rt2x00dev, 4, r4);
}
struct antenna_sel {
u8 word;
/*
* value[0] -> non-LNA
* value[1] -> LNA
*/
u8 value[2];
};
static const struct antenna_sel antenna_sel_a[] = {
{ 96, { 0x58, 0x78 } },
{ 104, { 0x38, 0x48 } },
{ 75, { 0xfe, 0x80 } },
{ 86, { 0xfe, 0x80 } },
{ 88, { 0xfe, 0x80 } },
{ 35, { 0x60, 0x60 } },
{ 97, { 0x58, 0x58 } },
{ 98, { 0x58, 0x58 } },
};
static const struct antenna_sel antenna_sel_bg[] = {
{ 96, { 0x48, 0x68 } },
{ 104, { 0x2c, 0x3c } },
{ 75, { 0xfe, 0x80 } },
{ 86, { 0xfe, 0x80 } },
{ 88, { 0xfe, 0x80 } },
{ 35, { 0x50, 0x50 } },
{ 97, { 0x48, 0x48 } },
{ 98, { 0x48, 0x48 } },
};
static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
struct antenna_setup *ant)
{
const struct antenna_sel *sel;
unsigned int lna;
unsigned int i;
u32 reg;
/*
* We should never come here because rt2x00lib is supposed
* to catch this and send us the correct antenna explicitely.
*/
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
sel = antenna_sel_a;
lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
} else {
sel = antenna_sel_bg;
lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
}
for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]);
rt2x00usb_register_read(rt2x00dev, PHY_CSR0, ®);
rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG,
(rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
rt2x00_set_field32(®, PHY_CSR0_PA_PE_A,
(rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
rt73usb_config_antenna_5x(rt2x00dev, ant);
else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
rt73usb_config_antenna_2x(rt2x00dev, ant);
}
static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
u16 eeprom;
short lna_gain = 0;
if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) {
if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
lna_gain += 14;
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
} else {
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
}
rt2x00dev->lna_gain = lna_gain;
}
static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
struct rf_channel *rf, const int txpower)
{
u8 r3;
u8 r94;
u8 smart;
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
rt73usb_bbp_write(rt2x00dev, 3, r3);
r94 = 6;
if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94))
r94 += txpower - MAX_TXPOWER;
else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94))
r94 += txpower;
rt73usb_bbp_write(rt2x00dev, 94, r94);
rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
udelay(10);
}
static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev,
const int txpower)
{
struct rf_channel rf;
rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
rt73usb_config_channel(rt2x00dev, &rf, txpower);
}
static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
rt2x00_set_field32(®, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT,
libconf->conf->short_frame_max_tx_count);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
}
static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf)
{
enum dev_state state =
(libconf->conf->flags & IEEE80211_CONF_PS) ?
STATE_SLEEP : STATE_AWAKE;
u32 reg;
if (state == STATE_SLEEP) {
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN,
rt2x00dev->beacon_int - 10);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP,
libconf->conf->listen_interval - 1);
rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 5);
/* We must first disable autowake before it can be enabled */
rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 1);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_SLEEP, REGISTER_TIMEOUT);
} else {
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®);
rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0);
rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_WAKEUP, REGISTER_TIMEOUT);
}
}
static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_conf *libconf,
const unsigned int flags)
{
/* Always recalculate LNA gain before changing configuration */
rt73usb_config_lna_gain(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
rt73usb_config_channel(rt2x00dev, &libconf->rf,
libconf->conf->power_level);
if ((flags & IEEE80211_CONF_CHANGE_POWER) &&
!(flags & IEEE80211_CONF_CHANGE_CHANNEL))
rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt73usb_config_retry_limit(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
rt73usb_config_ps(rt2x00dev, libconf);
}
/*
* Link tuning
*/
static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual)
{
u32 reg;
/*
* Update FCS error count from register.
*/
rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®);
qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR);
/*
* Update False CCA count from register.
*/
rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®);
qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
}
static inline void rt73usb_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
rt73usb_bbp_write(rt2x00dev, 17, vgc_level);
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
}
static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual)
{
rt73usb_set_vgc(rt2x00dev, qual, 0x20);
}
static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, const u32 count)
{
u8 up_bound;
u8 low_bound;
/*
* Determine r17 bounds.
*/
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
low_bound += 0x10;
up_bound += 0x10;
}
} else {
if (qual->rssi > -82) {
low_bound = 0x1c;
up_bound = 0x40;
} else if (qual->rssi > -84) {
low_bound = 0x1c;
up_bound = 0x20;
} else {
low_bound = 0x1c;
up_bound = 0x1c;
}
if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
low_bound += 0x14;
up_bound += 0x10;
}
}
/*
* If we are not associated, we should go straight to the
* dynamic CCA tuning.
*/
if (!rt2x00dev->intf_associated)
goto dynamic_cca_tune;
/*
* Special big-R17 for very short distance
*/
if (qual->rssi > -35) {
rt73usb_set_vgc(rt2x00dev, qual, 0x60);
return;
}
/*
* Special big-R17 for short distance
*/
if (qual->rssi >= -58) {
rt73usb_set_vgc(rt2x00dev, qual, up_bound);
return;
}
/*
* Special big-R17 for middle-short distance
*/
if (qual->rssi >= -66) {
rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x10);
return;
}
/*
* Special mid-R17 for middle distance
*/
if (qual->rssi >= -74) {
rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x08);
return;
}
/*
* Special case: Change up_bound based on the rssi.
* Lower up_bound when rssi is weaker then -74 dBm.
*/
up_bound -= 2 * (-74 - qual->rssi);
if (low_bound > up_bound)
up_bound = low_bound;
if (qual->vgc_level > up_bound) {
rt73usb_set_vgc(rt2x00dev, qual, up_bound);
return;
}
dynamic_cca_tune:
/*
* r17 does not yet exceed upper limit, continue and base
* the r17 tuning on the false CCA count.
*/
if ((qual->false_cca > 512) && (qual->vgc_level < up_bound))
rt73usb_set_vgc(rt2x00dev, qual,
min_t(u8, qual->vgc_level + 4, up_bound));
else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound))
rt73usb_set_vgc(rt2x00dev, qual,
max_t(u8, qual->vgc_level - 4, low_bound));
}
/*
* Queue handlers.
*/
static void rt73usb_start_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
switch (queue->qid) {
case QID_RX:
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
break;
default:
break;
}
}
static void rt73usb_stop_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
switch (queue->qid) {
case QID_RX:
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
break;
default:
break;
}
}
/*
* Firmware functions
*/
static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
return FIRMWARE_RT2571;
}
static int rt73usb_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
u16 fw_crc;
u16 crc;
/*
* Only support 2kb firmware files.
*/
if (len != 2048)
return FW_BAD_LENGTH;
/*
* The last 2 bytes in the firmware array are the crc checksum itself,
* this means that we should never pass those 2 bytes to the crc
* algorithm.
*/
fw_crc = (data[len - 2] << 8 | data[len - 1]);
/*
* Use the crc itu-t algorithm.
*/
crc = crc_itu_t(0, data, len - 2);
crc = crc_itu_t_byte(crc, 0);
crc = crc_itu_t_byte(crc, 0);
return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
}
static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
unsigned int i;
int status;
u32 reg;
/*
* Wait for stable hardware.
*/
for (i = 0; i < 100; i++) {
rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®);
if (reg)
break;
msleep(1);
}
if (!reg) {
ERROR(rt2x00dev, "Unstable hardware.\n");
return -EBUSY;
}
/*
* Write firmware to device.
*/
rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len);
/*
* Send firmware request to device to load firmware,
* we need to specify a long timeout time.
*/
status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE,
0, USB_MODE_FIRMWARE,
REGISTER_TIMEOUT_FIRMWARE);
if (status < 0) {
ERROR(rt2x00dev, "Failed to write Firmware to device.\n");
return status;
}
return 0;
}
/*
* Initialization functions.
*/
static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®);
rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1);
rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0);
rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
rt2x00usb_register_read(rt2x00dev, TXRX_CSR1, ®);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */
rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR1, reg);
/*
* CCK TXD BBP registers
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR2, ®);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10);
rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR2, reg);
/*
* OFDM TXD BBP registers
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR3, ®);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5);
rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg);
rt2x00usb_register_read(rt2x00dev, TXRX_CSR7, ®);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49);
rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg);
rt2x00usb_register_read(rt2x00dev, TXRX_CSR8, ®);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42);
rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg);
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0);
rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0);
rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
rt2x00usb_register_read(rt2x00dev, MAC_CSR6, ®);
rt2x00_set_field32(®, MAC_CSR6_MAX_FRAME_UNIT, 0xfff);
rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg);
rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718);
if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
return -EBUSY;
rt2x00usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00);
/*
* Invalidate all Shared Keys (SEC_CSR0),
* and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5)
*/
rt2x00usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000);
rt2x00usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000);
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
reg = 0x000023b0;
if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
rt2x00_set_field32(®, PHY_CSR1_RF_RPI, 1);
rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
rt2x00usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06);
rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®);
rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
/*
* Clear all beacons
* For the Beacon base registers we only need to clear
* the first byte since that byte contains the VALID and OWNER
* bits which (when set to 0) will invalidate the entire beacon.
*/
rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
/*
* We must clear the error counters.
* These registers are cleared on read,
* so we may pass a useless variable to store the value.
*/
rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®);
rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®);
rt2x00usb_register_read(rt2x00dev, STA_CSR2, ®);
/*
* Reset MAC and BBP registers.
*/
rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0);
rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®);
rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1);
rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
return 0;
}
static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u8 value;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt73usb_bbp_read(rt2x00dev, 0, &value);
if ((value != 0xff) && (value != 0x00))
return 0;
udelay(REGISTER_BUSY_DELAY);
}
ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
return -EACCES;
}
static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u16 eeprom;
u8 reg_id;
u8 value;
if (unlikely(rt73usb_wait_bbp_ready(rt2x00dev)))
return -EACCES;
rt73usb_bbp_write(rt2x00dev, 3, 0x80);
rt73usb_bbp_write(rt2x00dev, 15, 0x30);
rt73usb_bbp_write(rt2x00dev, 21, 0xc8);
rt73usb_bbp_write(rt2x00dev, 22, 0x38);
rt73usb_bbp_write(rt2x00dev, 23, 0x06);
rt73usb_bbp_write(rt2x00dev, 24, 0xfe);
rt73usb_bbp_write(rt2x00dev, 25, 0x0a);
rt73usb_bbp_write(rt2x00dev, 26, 0x0d);
rt73usb_bbp_write(rt2x00dev, 32, 0x0b);
rt73usb_bbp_write(rt2x00dev, 34, 0x12);
rt73usb_bbp_write(rt2x00dev, 37, 0x07);
rt73usb_bbp_write(rt2x00dev, 39, 0xf8);
rt73usb_bbp_write(rt2x00dev, 41, 0x60);
rt73usb_bbp_write(rt2x00dev, 53, 0x10);
rt73usb_bbp_write(rt2x00dev, 54, 0x18);
rt73usb_bbp_write(rt2x00dev, 60, 0x10);
rt73usb_bbp_write(rt2x00dev, 61, 0x04);
rt73usb_bbp_write(rt2x00dev, 62, 0x04);
rt73usb_bbp_write(rt2x00dev, 75, 0xfe);
rt73usb_bbp_write(rt2x00dev, 86, 0xfe);
rt73usb_bbp_write(rt2x00dev, 88, 0xfe);
rt73usb_bbp_write(rt2x00dev, 90, 0x0f);
rt73usb_bbp_write(rt2x00dev, 99, 0x00);
rt73usb_bbp_write(rt2x00dev, 102, 0x16);
rt73usb_bbp_write(rt2x00dev, 107, 0x04);
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
rt73usb_bbp_write(rt2x00dev, reg_id, value);
}
}
return 0;
}
/*
* Device state switch handlers.
*/
static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
/*
* Initialize all registers.
*/
if (unlikely(rt73usb_init_registers(rt2x00dev) ||
rt73usb_init_bbp(rt2x00dev)))
return -EIO;
return 0;
}
static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
/*
* Disable synchronisation.
*/
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, 0);
rt2x00usb_disable_radio(rt2x00dev);
}
static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
u32 reg, reg2;
unsigned int i;
char put_to_sleep;
put_to_sleep = (state != STATE_AWAKE);
rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®);
rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep);
rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep);
rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
/*
* Device is not guaranteed to be in the requested state yet.
* We must wait until the register indicates that the
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®2);
state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
if (state == !put_to_sleep)
return 0;
rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
msleep(10);
}
return -EBUSY;
}
static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
int retval = 0;
switch (state) {
case STATE_RADIO_ON:
retval = rt73usb_enable_radio(rt2x00dev);
break;
case STATE_RADIO_OFF:
rt73usb_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
case STATE_RADIO_IRQ_OFF:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
case STATE_SLEEP:
case STATE_STANDBY:
case STATE_AWAKE:
retval = rt73usb_set_state(rt2x00dev, state);
break;
default:
retval = -ENOTSUPP;
break;
}
if (unlikely(retval))
ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
state, retval);
return retval;
}
/*
* TX descriptor initialization
*/
static void rt73usb_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *txd = (__le32 *) entry->skb->data;
u32 word;
/*
* Start writing the descriptor words.
*/
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_ACK,
test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_BURST2,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
rt2x00_desc_write(txd, 0, word);
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
_rt2x00_desc_write(txd, 3, skbdesc->iv[0]);
_rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
}
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
/*
* Register descriptor details in skb frame descriptor.
*/
skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = txd;
skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
static void rt73usb_write_beacon(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
unsigned int padding_len;
u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
orig_reg = reg;
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
* Add space for the descriptor in front of the skb.
*/
skb_push(entry->skb, TXD_DESC_SIZE);
memset(entry->skb->data, 0, TXD_DESC_SIZE);
/*
* Write the TX descriptor for the beacon.
*/
rt73usb_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* Write entire beacon with descriptor and padding to register.
*/
padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
if (padding_len && skb_pad(entry->skb, padding_len)) {
ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
/* skb freed by skb_pad() on failure */
entry->skb = NULL;
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
return;
}
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
/*
* Enable beaconing again.
*
* For Wi-Fi faily generated beacons between participating stations.
* Set TBTT phase adaptive adjustment step to 8us (default 16us)
*/
rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
entry->skb = NULL;
}
static void rt73usb_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
u32 reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®);
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
* Clear beacon.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
/*
* Enable beaconing again.
*/
rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
{
int length;
/*
* The length _must_ be a multiple of 4,
* but it must _not_ be a multiple of the USB packet size.
*/
length = roundup(entry->skb->len, 4);
length += (4 * !(length % entry->queue->usb_maxpacket));
return length;
}
/*
* RX control handlers
*/
static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
{
u8 offset = rt2x00dev->lna_gain;
u8 lna;
lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
switch (lna) {
case 3:
offset += 90;
break;
case 2:
offset += 74;
break;
case 1:
offset += 64;
break;
default:
return 0;
}
if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
if (lna == 3 || lna == 2)
offset += 10;
} else {
if (lna == 3)
offset += 6;
else if (lna == 2)
offset += 8;
}
}
return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
}
static void rt73usb_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *rxd = (__le32 *)entry->skb->data;
u32 word0;
u32 word1;
/*
* Copy descriptor to the skbdesc->desc buffer, making it safe from moving of
* frame data in rt2x00usb.
*/
memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
rxd = (__le32 *)skbdesc->desc;
/*
* It is now safe to read the descriptor on all architectures.
*/
rt2x00_desc_read(rxd, 0, &word0);
rt2x00_desc_read(rxd, 1, &word1);
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
_rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]);
rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
_rt2x00_desc_read(rxd, 4, &rxdesc->icv);
rxdesc->dev_flags |= RXDONE_CRYPTO_ICV;
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. It has provided the data separately but rt2x00lib
* should decide if it should be reinserted.
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
* The hardware has already checked the Michael Mic and has
* stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
/*
* Obtain the status about this packet.
* When frame was received with an OFDM bitrate,
* the signal is the PLCP value. If it was received with
* a CCK bitrate the signal is the rate in 100kbit/s.
*/
rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1);
rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
if (rt2x00_get_field32(word0, RXD_W0_OFDM))
rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
else
rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
/*
* Set skb pointers, and update frame information.
*/
skb_pull(entry->skb, entry->queue->desc_size);
skb_trim(entry->skb, rxdesc->size);
}
/*
* Device probe functions.
*/
static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
u16 word;
u8 *mac;
s8 value;
rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE);
/*
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
random_ether_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2);
rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT,
ANTENNA_B);
rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT,
ANTENNA_B);
rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0);
rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0);
rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0);
rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226);
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0);
rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0);
rt2x00_set_field16(&word, EEPROM_LED_LED_MODE,
LED_MODE_DEFAULT);
rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word);
EEPROM(rt2x00dev, "Led: 0x%04x\n", word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word);
} else {
value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1);
if (value < -10 || value > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0);
value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2);
if (value < -10 || value > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word);
} else {
value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1);
if (value < -10 || value > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2);
if (value < -10 || value > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
}
return 0;
}
static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 value;
u16 eeprom;
/*
* Read EEPROM word for configuration.
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
/*
* Identify RF chipset.
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
if (!rt2x00_rf(rt2x00dev, RF5226) &&
!rt2x00_rf(rt2x00dev, RF2528) &&
!rt2x00_rf(rt2x00dev, RF5225) &&
!rt2x00_rf(rt2x00dev, RF2527)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
/*
* Identify default antenna configuration.
*/
rt2x00dev->default_ant.tx =
rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT);
rt2x00dev->default_ant.rx =
rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT);
/*
* Read the Frame type.
*/
if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE))
__set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags);
/*
* Detect if this device has an hardware controlled radio.
*/
if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
__set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
/*
* Read frequency offset.
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
* Read external LNA informations.
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) {
__set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
__set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
}
/*
* Store led settings, for correct led behaviour.
*/
#ifdef CONFIG_RT2X00_LIB_LEDS
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
rt73usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
if (value == LED_MODE_SIGNAL_STRENGTH)
rt73usb_init_led(rt2x00dev, &rt2x00dev->led_qual,
LED_TYPE_QUALITY);
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value);
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_GPIO_0));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_GPIO_1));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_GPIO_2));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_GPIO_3));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_GPIO_4));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT,
rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_RDY_G));
rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
rt2x00_get_field16(eeprom,
EEPROM_LED_POLARITY_RDY_A));
#endif /* CONFIG_RT2X00_LIB_LEDS */
return 0;
}
/*
* RF value list for RF2528
* Supports: 2.4 GHz
*/
static const struct rf_channel rf_vals_bg_2528[] = {
{ 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b },
{ 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f },
{ 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b },
{ 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f },
{ 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b },
{ 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f },
{ 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b },
{ 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f },
{ 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b },
{ 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f },
{ 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b },
{ 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f },
{ 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b },
{ 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 },
};
/*
* RF value list for RF5226
* Supports: 2.4 GHz & 5.2 GHz
*/
static const struct rf_channel rf_vals_5226[] = {
{ 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b },
{ 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f },
{ 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b },
{ 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f },
{ 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b },
{ 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f },
{ 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b },
{ 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f },
{ 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b },
{ 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f },
{ 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b },
{ 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f },
{ 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b },
{ 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 },
/* 802.11 UNI / HyperLan 2 */
{ 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 },
{ 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 },
{ 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b },
{ 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 },
{ 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b },
{ 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 },
{ 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 },
{ 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b },
/* 802.11 HyperLan 2 */
{ 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 },
{ 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b },
{ 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 },
{ 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b },
{ 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 },
{ 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 },
{ 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b },
{ 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 },
{ 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b },
{ 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 },
/* 802.11 UNII */
{ 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 },
{ 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f },
{ 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 },
{ 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 },
{ 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f },
{ 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 },
/* MMAC(Japan)J52 ch 34,38,42,46 */
{ 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b },
{ 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 },
{ 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b },
{ 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 },
};
/*
* RF value list for RF5225 & RF2527
* Supports: 2.4 GHz & 5.2 GHz
*/
static const struct rf_channel rf_vals_5225_2527[] = {
{ 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b },
{ 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f },
{ 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b },
{ 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f },
{ 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b },
{ 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f },
{ 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b },
{ 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f },
{ 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b },
{ 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f },
{ 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b },
{ 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f },
{ 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b },
{ 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 },
/* 802.11 UNI / HyperLan 2 */
{ 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 },
{ 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 },
{ 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b },
{ 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 },
{ 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b },
{ 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 },
{ 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 },
{ 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b },
/* 802.11 HyperLan 2 */
{ 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 },
{ 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b },
{ 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 },
{ 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b },
{ 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 },
{ 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 },
{ 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b },
{ 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 },
{ 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b },
{ 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 },
/* 802.11 UNII */
{ 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 },
{ 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f },
{ 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 },
{ 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 },
{ 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f },
{ 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 },
/* MMAC(Japan)J52 ch 34,38,42,46 */
{ 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b },
{ 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 },
{ 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b },
{ 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 },
};
static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power;
unsigned int i;
/*
* Initialize all hw fields.
*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
* capable of sending the buffered frames out after the DTIM
* transmission using rt2x00lib_beacondone. This will send out
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
rt2x00dev->hw->flags =
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
/*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
if (rt2x00_rf(rt2x00dev, RF2528)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
spec->channels = rf_vals_bg_2528;
} else if (rt2x00_rf(rt2x00dev, RF5226)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5226);
spec->channels = rf_vals_5226;
} else if (rt2x00_rf(rt2x00dev, RF2527)) {
spec->num_channels = 14;
spec->channels = rf_vals_5225_2527;
} else if (rt2x00_rf(rt2x00dev, RF5225)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
spec->channels = rf_vals_5225_2527;
}
/*
* Create channel information array
*/
info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
for (i = 0; i < 14; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
if (spec->num_channels > 14) {
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 =
TXPOWER_FROM_DEV(tx_power[i - 14]);
}
}
return 0;
}
static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
u32 reg;
/*
* Allocate eeprom data.
*/
retval = rt73usb_validate_eeprom(rt2x00dev);
if (retval)
return retval;
retval = rt73usb_init_eeprom(rt2x00dev);
if (retval)
return retval;
/*
* Enable rfkill polling by setting GPIO direction of the
* rfkill switch GPIO pin correctly.
*/
rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®);
rt2x00_set_field32(®, MAC_CSR13_BIT15, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
/*
* Initialize hw specifications.
*/
retval = rt73usb_probe_hw_mode(rt2x00dev);
if (retval)
return retval;
/*
* This device has multiple filters for control frames,
* but has no a separate filter for PS Poll frames.
*/
__set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
/*
* This device requires firmware.
*/
__set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
if (!modparam_nohwcrypt)
__set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
__set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
__set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
/*
* Set the rssi offset.
*/
rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
return 0;
}
/*
* IEEE80211 stack callback functions.
*/
static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
const struct ieee80211_tx_queue_params *params)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
struct rt2x00_field32 field;
int retval;
u32 reg;
u32 offset;
/*
* First pass the configuration through rt2x00lib, that will
* update the queue settings and validate the input. After that
* we are free to update the registers based on the value
* in the queue parameter.
*/
retval = rt2x00mac_conf_tx(hw, queue_idx, params);
if (retval)
return retval;
/*
* We only need to perform additional register initialization
* for WMM queues/
*/
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
field.bit_offset = (queue_idx & 1) * 16;
field.bit_mask = 0xffff << field.bit_offset;
rt2x00usb_register_read(rt2x00dev, offset, ®);
rt2x00_set_field32(®, field, queue->txop);
rt2x00usb_register_write(rt2x00dev, offset, reg);
/* Update WMM registers */
field.bit_offset = queue_idx * 4;
field.bit_mask = 0xf << field.bit_offset;
rt2x00usb_register_read(rt2x00dev, AIFSN_CSR, ®);
rt2x00_set_field32(®, field, queue->aifs);
rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg);
rt2x00usb_register_read(rt2x00dev, CWMIN_CSR, ®);
rt2x00_set_field32(®, field, queue->cw_min);
rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg);
rt2x00usb_register_read(rt2x00dev, CWMAX_CSR, ®);
rt2x00_set_field32(®, field, queue->cw_max);
rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg);
return 0;
}
static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u64 tsf;
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR13, ®);
tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR12, ®);
tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER);
return tsf;
}
static const struct ieee80211_ops rt73usb_mac80211_ops = {
.tx = rt2x00mac_tx,
.start = rt2x00mac_start,
.stop = rt2x00mac_stop,
.add_interface = rt2x00mac_add_interface,
.remove_interface = rt2x00mac_remove_interface,
.config = rt2x00mac_config,
.configure_filter = rt2x00mac_configure_filter,
.set_tim = rt2x00mac_set_tim,
.set_key = rt2x00mac_set_key,
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt73usb_conf_tx,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
.flush = rt2x00mac_flush,
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
};
static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.probe_hw = rt73usb_probe_hw,
.get_firmware_name = rt73usb_get_firmware_name,
.check_firmware = rt73usb_check_firmware,
.load_firmware = rt73usb_load_firmware,
.initialize = rt2x00usb_initialize,
.uninitialize = rt2x00usb_uninitialize,
.clear_entry = rt2x00usb_clear_entry,
.set_device_state = rt73usb_set_device_state,
.rfkill_poll = rt73usb_rfkill_poll,
.link_stats = rt73usb_link_stats,
.reset_tuner = rt73usb_reset_tuner,
.link_tuner = rt73usb_link_tuner,
.watchdog = rt2x00usb_watchdog,
.start_queue = rt73usb_start_queue,
.kick_queue = rt2x00usb_kick_queue,
.stop_queue = rt73usb_stop_queue,
.flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt73usb_write_tx_desc,
.write_beacon = rt73usb_write_beacon,
.clear_beacon = rt73usb_clear_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
.config_pairwise_key = rt73usb_config_pairwise_key,
.config_filter = rt73usb_config_filter,
.config_intf = rt73usb_config_intf,
.config_erp = rt73usb_config_erp,
.config_ant = rt73usb_config_ant,
.config = rt73usb_config,
};
static const struct data_queue_desc rt73usb_queue_rx = {
.entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt73usb_queue_tx = {
.entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt73usb_queue_bcn = {
.entry_num = 4,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXINFO_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct rt2x00_ops rt73usb_ops = {
.name = KBUILD_MODNAME,
.max_sta_intf = 1,
.max_ap_intf = 4,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
.tx_queues = NUM_TX_QUEUES,
.extra_tx_headroom = TXD_DESC_SIZE,
.rx = &rt73usb_queue_rx,
.tx = &rt73usb_queue_tx,
.bcn = &rt73usb_queue_bcn,
.lib = &rt73usb_rt2x00_ops,
.hw = &rt73usb_mac80211_ops,
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
.debugfs = &rt73usb_rt2x00debug,
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
};
/*
* rt73usb module information.
*/
static struct usb_device_id rt73usb_device_table[] = {
/* AboCom */
{ USB_DEVICE(0x07b8, 0xb21b) },
{ USB_DEVICE(0x07b8, 0xb21c) },
{ USB_DEVICE(0x07b8, 0xb21d) },
{ USB_DEVICE(0x07b8, 0xb21e) },
{ USB_DEVICE(0x07b8, 0xb21f) },
/* AL */
{ USB_DEVICE(0x14b2, 0x3c10) },
/* Amigo */
{ USB_DEVICE(0x148f, 0x9021) },
{ USB_DEVICE(0x0eb0, 0x9021) },
/* AMIT */
{ USB_DEVICE(0x18c5, 0x0002) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0722) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1723) },
{ USB_DEVICE(0x0b05, 0x1724) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x705a) },
{ USB_DEVICE(0x050d, 0x905b) },
{ USB_DEVICE(0x050d, 0x905c) },
/* Billionton */
{ USB_DEVICE(0x1631, 0xc019) },
{ USB_DEVICE(0x08dd, 0x0120) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8) },
{ USB_DEVICE(0x0411, 0x00d9) },
{ USB_DEVICE(0x0411, 0x00f4) },
{ USB_DEVICE(0x0411, 0x0116) },
{ USB_DEVICE(0x0411, 0x0119) },
{ USB_DEVICE(0x0411, 0x0137) },
/* CEIVA */
{ USB_DEVICE(0x178d, 0x02be) },
/* CNet */
{ USB_DEVICE(0x1371, 0x9022) },
{ USB_DEVICE(0x1371, 0x9032) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c22) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002e) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c03) },
{ USB_DEVICE(0x07d1, 0x3c04) },
{ USB_DEVICE(0x07d1, 0x3c06) },
{ USB_DEVICE(0x07d1, 0x3c07) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7318) },
{ USB_DEVICE(0x7392, 0x7618) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x3701) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0004) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x8008) },
{ USB_DEVICE(0x1044, 0x800a) },
/* Huawei-3Com */
{ USB_DEVICE(0x1472, 0x0009) },
/* Hercules */
{ USB_DEVICE(0x06f8, 0xe002) },
{ USB_DEVICE(0x06f8, 0xe010) },
{ USB_DEVICE(0x06f8, 0xe020) },
/* Linksys */
{ USB_DEVICE(0x13b1, 0x0020) },
{ USB_DEVICE(0x13b1, 0x0023) },
{ USB_DEVICE(0x13b1, 0x0028) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x4600) },
{ USB_DEVICE(0x0db0, 0x6877) },
{ USB_DEVICE(0x0db0, 0x6874) },
{ USB_DEVICE(0x0db0, 0xa861) },
{ USB_DEVICE(0x0db0, 0xa874) },
/* Ovislink */
{ USB_DEVICE(0x1b75, 0x7318) },
/* Ralink */
{ USB_DEVICE(0x04bb, 0x093d) },
{ USB_DEVICE(0x148f, 0x2573) },
{ USB_DEVICE(0x148f, 0x2671) },
{ USB_DEVICE(0x0812, 0x3101) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6196) },
{ USB_DEVICE(0x18e8, 0x6229) },
{ USB_DEVICE(0x18e8, 0x6238) },
/* Samsung */
{ USB_DEVICE(0x04e8, 0x4471) },
/* Senao */
{ USB_DEVICE(0x1740, 0x7100) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0024) },
{ USB_DEVICE(0x0df6, 0x0027) },
{ USB_DEVICE(0x0df6, 0x002f) },
{ USB_DEVICE(0x0df6, 0x90ac) },
{ USB_DEVICE(0x0df6, 0x9712) },
/* Surecom */
{ USB_DEVICE(0x0769, 0x31f3) },
/* Tilgin */
{ USB_DEVICE(0x6933, 0x5001) },
/* Philips */
{ USB_DEVICE(0x0471, 0x200a) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab01) },
{ USB_DEVICE(0x2019, 0xab50) },
/* WideTell */
{ USB_DEVICE(0x7167, 0x3840) },
/* Zcom */
{ USB_DEVICE(0x0cde, 0x001c) },
/* ZyXEL */
{ USB_DEVICE(0x0586, 0x3415) },
{ 0, }
};
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards");
MODULE_DEVICE_TABLE(usb, rt73usb_device_table);
MODULE_FIRMWARE(FIRMWARE_RT2571);
MODULE_LICENSE("GPL");
static int rt73usb_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
return rt2x00usb_probe(usb_intf, &rt73usb_ops);
}
static struct usb_driver rt73usb_driver = {
.name = KBUILD_MODNAME,
.id_table = rt73usb_device_table,
.probe = rt73usb_probe,
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
};
static int __init rt73usb_init(void)
{
return usb_register(&rt73usb_driver);
}
static void __exit rt73usb_exit(void)
{
usb_deregister(&rt73usb_driver);
}
module_init(rt73usb_init);
module_exit(rt73usb_exit);
| gpl-2.0 |
qwertyTom/android_kernel_cyanogen_msm8916 | fs/9p/vfs_inode_dotl.c | 1803 | 25862 | /*
* linux/fs/9p/vfs_inode_dotl.c
*
* This file contains vfs inode ops for the 9P2000.L protocol.
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/inet.h>
#include <linux/namei.h>
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
#include "cache.h"
#include "xattr.h"
#include "acl.h"
static int
v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev);
/**
* v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
* new file system object. This checks the S_ISGID to determine the owning
* group of the new file system object.
*/
static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
{
BUG_ON(dir_inode == NULL);
if (dir_inode->i_mode & S_ISGID) {
/* set_gid bit is set.*/
return dir_inode->i_gid;
}
return current_fsgid();
}
static int v9fs_test_inode_dotl(struct inode *inode, void *data)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
/* don't match inode of different type */
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
return 0;
if (inode->i_generation != st->st_gen)
return 0;
/* compare qid details */
if (memcmp(&v9inode->qid.version,
&st->qid.version, sizeof(v9inode->qid.version)))
return 0;
if (v9inode->qid.type != st->qid.type)
return 0;
return 1;
}
/* Always get a new inode */
static int v9fs_test_new_inode_dotl(struct inode *inode, void *data)
{
return 0;
}
static int v9fs_set_inode_dotl(struct inode *inode, void *data)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
inode->i_generation = st->st_gen;
return 0;
}
static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
struct p9_qid *qid,
struct p9_fid *fid,
struct p9_stat_dotl *st,
int new)
{
int retval;
unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
int (*test)(struct inode *, void *);
if (new)
test = v9fs_test_new_inode_dotl;
else
test = v9fs_test_inode_dotl;
i_ino = v9fs_qid2ino(qid);
inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
inode->i_ino = i_ino;
retval = v9fs_init_inode(v9ses, inode,
st->st_mode, new_decode_dev(st->st_rdev));
if (retval)
goto error;
v9fs_stat2inode_dotl(st, inode);
#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_get_cookie(inode);
#endif
retval = v9fs_get_acl(inode, fid);
if (retval)
goto error;
unlock_new_inode(inode);
return inode;
error:
unlock_new_inode(inode);
iput(inode);
return ERR_PTR(retval);
}
struct inode *
v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb, int new)
{
struct p9_stat_dotl *st;
struct inode *inode = NULL;
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN);
if (IS_ERR(st))
return ERR_CAST(st);
inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new);
kfree(st);
return inode;
}
struct dotl_openflag_map {
int open_flag;
int dotl_flag;
};
static int v9fs_mapped_dotl_flags(int flags)
{
int i;
int rflags = 0;
struct dotl_openflag_map dotl_oflag_map[] = {
{ O_CREAT, P9_DOTL_CREATE },
{ O_EXCL, P9_DOTL_EXCL },
{ O_NOCTTY, P9_DOTL_NOCTTY },
{ O_APPEND, P9_DOTL_APPEND },
{ O_NONBLOCK, P9_DOTL_NONBLOCK },
{ O_DSYNC, P9_DOTL_DSYNC },
{ FASYNC, P9_DOTL_FASYNC },
{ O_DIRECT, P9_DOTL_DIRECT },
{ O_LARGEFILE, P9_DOTL_LARGEFILE },
{ O_DIRECTORY, P9_DOTL_DIRECTORY },
{ O_NOFOLLOW, P9_DOTL_NOFOLLOW },
{ O_NOATIME, P9_DOTL_NOATIME },
{ O_CLOEXEC, P9_DOTL_CLOEXEC },
{ O_SYNC, P9_DOTL_SYNC},
};
for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
if (flags & dotl_oflag_map[i].open_flag)
rflags |= dotl_oflag_map[i].dotl_flag;
}
return rflags;
}
/**
* v9fs_open_to_dotl_flags- convert Linux specific open flags to
* plan 9 open flag.
* @flags: flags to convert
*/
int v9fs_open_to_dotl_flags(int flags)
{
int rflags = 0;
/*
* We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
* and P9_DOTL_NOACCESS
*/
rflags |= flags & O_ACCMODE;
rflags |= v9fs_mapped_dotl_flags(flags);
return rflags;
}
/**
* v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @mode: create permissions
*
*/
static int
v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
bool excl)
{
return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
}
static int
v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t omode,
int *opened)
{
int err = 0;
kgid_t gid;
umode_t mode;
char *name = NULL;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
struct v9fs_inode *v9inode;
struct p9_fid *dfid, *ofid, *inode_fid;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
if (d_unhashed(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
if (res)
dentry = res;
}
/* Only creates */
if (!(flags & O_CREAT))
return finish_no_open(file, res);
else if (dentry->d_inode) {
if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
return -EEXIST;
else
return finish_no_open(file, res);
}
v9ses = v9fs_inode2v9ses(dir);
name = (char *) dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
name, flags, omode);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
goto out;
}
/* clone a fid to use for creation */
ofid = p9_client_walk(dfid, 0, NULL, 1);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
goto out;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n",
err);
goto error;
}
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
mode, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n",
err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
fid = NULL;
goto error;
}
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
goto error;
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(inode, fid, dacl, pacl);
v9fs_fid_add(dentry, fid);
d_instantiate(dentry, inode);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (v9ses->cache && !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
mutex_unlock(&v9inode->v_mutex);
goto err_clunk_old_fid;
}
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
err = finish_open(file, dentry, generic_file_open, opened);
if (err)
goto err_clunk_old_fid;
file->private_data = ofid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, file);
#endif
*opened |= FILE_CREATED;
out:
v9fs_put_acl(dacl, pacl);
dput(res);
return err;
error:
if (fid)
p9_client_clunk(fid);
err_clunk_old_fid:
if (ofid)
p9_client_clunk(ofid);
goto out;
}
/**
* v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
* @dir: inode that is being unlinked
* @dentry: dentry that is being unlinked
* @mode: mode for new directory
*
*/
static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct dentry *dentry, umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
kgid_t gid;
char *name;
umode_t mode;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
if (dir->i_mode & S_ISGID)
omode |= S_ISGID;
dir_dentry = dentry->d_parent;
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n",
err);
goto error;
}
name = (char *) dentry->d_name.name;
err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
if (err < 0)
goto error;
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
v9fs_fid_add(dentry, fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
fid = NULL;
err = 0;
} else {
/*
* Not in cached mode. No need to populate
* inode with stat. We need to get an inode
* so that we can set the acl with dentry
*/
inode = v9fs_get_inode(dir->i_sb, mode, 0);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
}
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
if (fid)
p9_client_clunk(fid);
v9fs_put_acl(dacl, pacl);
return err;
}
static int
v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
int err;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_stat_dotl *st;
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
/* Ask for all the fields in stat structure. Server will return
* whatever it supports
*/
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, dentry->d_inode);
generic_fillattr(dentry->d_inode, stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
kfree(st);
return 0;
}
/*
* Attribute flags.
*/
#define P9_ATTR_MODE (1 << 0)
#define P9_ATTR_UID (1 << 1)
#define P9_ATTR_GID (1 << 2)
#define P9_ATTR_SIZE (1 << 3)
#define P9_ATTR_ATIME (1 << 4)
#define P9_ATTR_MTIME (1 << 5)
#define P9_ATTR_CTIME (1 << 6)
#define P9_ATTR_ATIME_SET (1 << 7)
#define P9_ATTR_MTIME_SET (1 << 8)
struct dotl_iattr_map {
int iattr_valid;
int p9_iattr_valid;
};
static int v9fs_mapped_iattr_valid(int iattr_valid)
{
int i;
int p9_iattr_valid = 0;
struct dotl_iattr_map dotl_iattr_map[] = {
{ ATTR_MODE, P9_ATTR_MODE },
{ ATTR_UID, P9_ATTR_UID },
{ ATTR_GID, P9_ATTR_GID },
{ ATTR_SIZE, P9_ATTR_SIZE },
{ ATTR_ATIME, P9_ATTR_ATIME },
{ ATTR_MTIME, P9_ATTR_MTIME },
{ ATTR_CTIME, P9_ATTR_CTIME },
{ ATTR_ATIME_SET, P9_ATTR_ATIME_SET },
{ ATTR_MTIME_SET, P9_ATTR_MTIME_SET },
};
for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) {
if (iattr_valid & dotl_iattr_map[i].iattr_valid)
p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid;
}
return p9_iattr_valid;
}
/**
* v9fs_vfs_setattr_dotl - set file metadata
* @dentry: file whose metadata to set
* @iattr: metadata assignment structure
*
*/
int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
{
int retval;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct p9_iattr_dotl p9attr;
struct inode *inode = dentry->d_inode;
p9_debug(P9_DEBUG_VFS, "\n");
retval = inode_change_ok(inode, iattr);
if (retval)
return retval;
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
p9attr.mode = iattr->ia_mode;
p9attr.uid = iattr->ia_uid;
p9attr.gid = iattr->ia_gid;
p9attr.size = iattr->ia_size;
p9attr.atime_sec = iattr->ia_atime.tv_sec;
p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
/* Write all dirty data */
if (S_ISREG(inode->i_mode))
filemap_write_and_wait(inode->i_mapping);
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode))
truncate_setsize(inode, iattr->ia_size);
v9fs_invalidate_inode_attr(inode);
setattr_copy(inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE) {
/* We also want to update ACL when we update mode bits */
retval = v9fs_acl_chmod(inode, fid);
if (retval < 0)
return retval;
}
return 0;
}
/**
* v9fs_stat2inode_dotl - populate an inode structure with stat info
* @stat: stat structure
* @inode: inode to populate
* @sb: superblock of filesystem
*
*/
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
inode->i_uid = stat->st_uid;
inode->i_gid = stat->st_gid;
set_nlink(inode, stat->st_nlink);
mode = stat->st_mode & S_IALLUGO;
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
inode->i_atime.tv_sec = stat->st_atime_sec;
inode->i_atime.tv_nsec = stat->st_atime_nsec;
}
if (stat->st_result_mask & P9_STATS_MTIME) {
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
}
if (stat->st_result_mask & P9_STATS_CTIME) {
inode->i_ctime.tv_sec = stat->st_ctime_sec;
inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
}
if (stat->st_result_mask & P9_STATS_UID)
inode->i_uid = stat->st_uid;
if (stat->st_result_mask & P9_STATS_GID)
inode->i_gid = stat->st_gid;
if (stat->st_result_mask & P9_STATS_NLINK)
set_nlink(inode, stat->st_nlink);
if (stat->st_result_mask & P9_STATS_MODE) {
inode->i_mode = stat->st_mode;
if ((S_ISBLK(inode->i_mode)) ||
(S_ISCHR(inode->i_mode)))
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
if (stat->st_result_mask & P9_STATS_SIZE)
i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
if (stat->st_result_mask & P9_STATS_GEN)
inode->i_generation = stat->st_gen;
/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
* because the inode structure does not have fields for them.
*/
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
static int
v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
const char *symname)
{
int err;
kgid_t gid;
char *name;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *dfid;
struct p9_fid *fid = NULL;
struct v9fs_session_info *v9ses;
name = (char *) dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
return err;
}
gid = v9fs_get_fsgid_for_create(dir);
/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
/* instantiate inode and assign the unopened fid to dentry */
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
v9fs_fid_add(dentry, fid);
d_instantiate(dentry, inode);
fid = NULL;
err = 0;
} else {
/* Not in cached mode. No need to populate inode with stat */
inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
d_instantiate(dentry, inode);
}
error:
if (fid)
p9_client_clunk(fid);
return err;
}
/**
* v9fs_vfs_link_dotl - create a hardlink for dotl
* @old_dentry: dentry for file to link to
* @dir: inode destination for new link
* @dentry: dentry for link
*
*/
static int
v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
char *name;
struct dentry *dir_dentry;
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = dentry->d_parent;
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid))
return PTR_ERR(dfid);
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid))
return PTR_ERR(oldfid);
name = (char *) dentry->d_name.name;
err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
if (err < 0) {
p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
return err;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
fid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
v9fs_refresh_inode_dotl(fid, old_dentry->d_inode);
}
ihold(old_dentry->d_inode);
d_instantiate(dentry, old_dentry->d_inode);
return err;
}
/**
* v9fs_vfs_mknod_dotl - create a special file
* @dir: inode destination for new link
* @dentry: dentry for file
* @mode: mode for creation
* @rdev: device associated with special file
*
*/
static int
v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
dev_t rdev)
{
int err;
kgid_t gid;
char *name;
umode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
dir->i_ino, dentry->d_name.name, omode,
MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
return -EINVAL;
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = dentry->d_parent;
dfid = v9fs_fid_lookup(dir_dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
dfid = NULL;
goto error;
}
gid = v9fs_get_fsgid_for_create(dir);
mode = omode;
/* Update mode based on ACL value */
err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
if (err) {
p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n",
err);
goto error;
}
name = (char *) dentry->d_name.name;
err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
if (err < 0)
goto error;
v9fs_invalidate_inode_attr(dir);
fid = p9_client_walk(dfid, 1, &name, 1);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
err);
fid = NULL;
goto error;
}
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
err);
goto error;
}
v9fs_set_create_acl(inode, fid, dacl, pacl);
v9fs_fid_add(dentry, fid);
d_instantiate(dentry, inode);
fid = NULL;
err = 0;
} else {
/*
* Not in cached mode. No need to populate inode with stat.
* socket syscall returns a fd, so we need instantiate
*/
inode = v9fs_get_inode(dir->i_sb, mode, rdev);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
v9fs_set_create_acl(inode, fid, dacl, pacl);
d_instantiate(dentry, inode);
}
error:
if (fid)
p9_client_clunk(fid);
v9fs_put_acl(dacl, pacl);
return err;
}
/**
* v9fs_vfs_follow_link_dotl - follow a symlink path
* @dentry: dentry for symlink
* @nd: nameidata
*
*/
static void *
v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
{
int retval;
struct p9_fid *fid;
char *link = __getname();
char *target;
p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
if (!link) {
link = ERR_PTR(-ENOMEM);
goto ndset;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid)) {
__putname(link);
link = ERR_CAST(fid);
goto ndset;
}
retval = p9_client_readlink(fid, &target);
if (!retval) {
strcpy(link, target);
kfree(target);
goto ndset;
}
__putname(link);
link = ERR_PTR(retval);
ndset:
nd_set_link(nd, link);
return NULL;
}
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
if (IS_ERR(st))
return PTR_ERR(st);
/*
* Don't update inode if the file type is different
*/
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
goto out;
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode_dotl(st, inode);
if (v9ses->cache)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
out:
kfree(st);
return 0;
}
const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create_dotl,
.atomic_open = v9fs_vfs_atomic_open_dotl,
.lookup = v9fs_vfs_lookup,
.link = v9fs_vfs_link_dotl,
.symlink = v9fs_vfs_symlink_dotl,
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir_dotl,
.rmdir = v9fs_vfs_rmdir,
.mknod = v9fs_vfs_mknod_dotl,
.rename = v9fs_vfs_rename,
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
.get_acl = v9fs_iop_get_acl,
};
const struct inode_operations v9fs_file_inode_operations_dotl = {
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
.get_acl = v9fs_iop_get_acl,
};
const struct inode_operations v9fs_symlink_inode_operations_dotl = {
.readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link_dotl,
.put_link = v9fs_vfs_put_link,
.getattr = v9fs_vfs_getattr_dotl,
.setattr = v9fs_vfs_setattr_dotl,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = v9fs_listxattr,
};
| gpl-2.0 |
bangprovn/caf-kernel | fs/proc/array.c | 3083 | 15063 | /*
* linux/fs/proc/array.c
*
* Copyright (C) 1992 by Linus Torvalds
* based on ideas by Darren Senn
*
* Fixes:
* Michael. K. Johnson: stat,statm extensions.
* <johnsonm@stolaf.edu>
*
* Pauline Middelink : Made cmdline,envline only break at '\0's, to
* make sure SET_PROCTITLE works. Also removed
* bad '!' which forced address recalculation for
* EVERY character on the current page.
* <middelin@polyware.iaf.nl>
*
* Danny ter Haar : added cpuinfo
* <dth@cistron.nl>
*
* Alessandro Rubini : profile extension.
* <rubini@ipvvis.unipv.it>
*
* Jeff Tranter : added BogoMips field to cpuinfo
* <Jeff_Tranter@Mitel.COM>
*
* Bruno Haible : remove 4K limit for the maps file
* <haible@ma2s2.mathematik.uni-karlsruhe.de>
*
* Yves Arrouye : remove removal of trailing spaces in get_array.
* <Yves.Arrouye@marin.fdn.fr>
*
* Jerome Forissier : added per-CPU time information to /proc/stat
* and /proc/<pid>/cpu extension
* <forissier@isia.cma.fr>
* - Incorporation and non-SMP safe operation
* of forissier patch in 2.1.78 by
* Hans Marcus <crowbar@concepts.nl>
*
* aeb@cwi.nl : /proc/partitions
*
*
* Alan Cox : security fixes.
* <alan@lxorguk.ukuu.org.uk>
*
* Al Viro : safe handling of mm_struct
*
* Gerhard Wichert : added BIGMEM support
* Siemens AG <Gerhard.Wichert@pdb.siemens.de>
*
* Al Viro & Jeff Garzik : moved most of the thing into base.c and
* : proc_misc.c. The rest may eventually go into
* : base.c too.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/mman.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/times.h>
#include <linux/cpuset.h>
#include <linux/rcupdate.h>
#include <linux/delayacct.h>
#include <linux/seq_file.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include "internal.h"
static inline void task_name(struct seq_file *m, struct task_struct *p)
{
int i;
char *buf, *end;
char *name;
char tcomm[sizeof(p->comm)];
get_task_comm(tcomm, p);
seq_puts(m, "Name:\t");
end = m->buf + m->size;
buf = m->buf + m->count;
name = tcomm;
i = sizeof(tcomm);
while (i && (buf < end)) {
unsigned char c = *name;
name++;
i--;
*buf = c;
if (!c)
break;
if (c == '\\') {
buf++;
if (buf < end)
*buf++ = c;
continue;
}
if (c == '\n') {
*buf++ = '\\';
if (buf < end)
*buf++ = 'n';
continue;
}
buf++;
}
m->count = buf - m->buf;
seq_putc(m, '\n');
}
/*
* The task state array is a strange "bitmap" of
* reasons to sleep. Thus "running" is zero, and
* you can test for combinations of others with
* simple bit tests.
*/
static const char * const task_state_array[] = {
"R (running)", /* 0 */
"S (sleeping)", /* 1 */
"D (disk sleep)", /* 2 */
"T (stopped)", /* 4 */
"t (tracing stop)", /* 8 */
"Z (zombie)", /* 16 */
"X (dead)", /* 32 */
"x (dead)", /* 64 */
"K (wakekill)", /* 128 */
"W (waking)", /* 256 */
};
static inline const char *get_task_state(struct task_struct *tsk)
{
unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
const char * const *p = &task_state_array[0];
BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
while (state) {
p++;
state >>= 1;
}
return *p;
}
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
const struct cred *cred;
pid_t ppid, tpid;
rcu_read_lock();
ppid = pid_alive(p) ?
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = 0;
if (pid_alive(p)) {
struct task_struct *tracer = ptrace_parent(p);
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
cred = get_task_cred(p);
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
pid_nr_ns(pid, ns),
ppid, tpid,
cred->uid, cred->euid, cred->suid, cred->fsuid,
cred->gid, cred->egid, cred->sgid, cred->fsgid);
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = cred->group_info;
task_unlock(p);
for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
seq_printf(m, "%d ", GROUP_AT(group_info, g));
put_cred(cred);
seq_putc(m, '\n');
}
static void render_sigset_t(struct seq_file *m, const char *header,
sigset_t *set)
{
int i;
seq_puts(m, header);
i = _NSIG;
do {
int x = 0;
i -= 4;
if (sigismember(set, i+1)) x |= 1;
if (sigismember(set, i+2)) x |= 2;
if (sigismember(set, i+3)) x |= 4;
if (sigismember(set, i+4)) x |= 8;
seq_printf(m, "%x", x);
} while (i >= 4);
seq_putc(m, '\n');
}
static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigset_t *catch)
{
struct k_sigaction *k;
int i;
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
sigaddset(ign, i);
else if (k->sa.sa_handler != SIG_DFL)
sigaddset(catch, i);
}
}
static inline void task_sig(struct seq_file *m, struct task_struct *p)
{
unsigned long flags;
sigset_t pending, shpending, blocked, ignored, caught;
int num_threads = 0;
unsigned long qsize = 0;
unsigned long qlim = 0;
sigemptyset(&pending);
sigemptyset(&shpending);
sigemptyset(&blocked);
sigemptyset(&ignored);
sigemptyset(&caught);
if (lock_task_sighand(p, &flags)) {
pending = p->pending.signal;
shpending = p->signal->shared_pending.signal;
blocked = p->blocked;
collect_sigign_sigcatch(p, &ignored, &caught);
num_threads = get_nr_threads(p);
rcu_read_lock(); /* FIXME: is this correct? */
qsize = atomic_read(&__task_cred(p)->user->sigpending);
rcu_read_unlock();
qlim = task_rlimit(p, RLIMIT_SIGPENDING);
unlock_task_sighand(p, &flags);
}
seq_printf(m, "Threads:\t%d\n", num_threads);
seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
/* render them all */
render_sigset_t(m, "SigPnd:\t", &pending);
render_sigset_t(m, "ShdPnd:\t", &shpending);
render_sigset_t(m, "SigBlk:\t", &blocked);
render_sigset_t(m, "SigIgn:\t", &ignored);
render_sigset_t(m, "SigCgt:\t", &caught);
}
static void render_cap_t(struct seq_file *m, const char *header,
kernel_cap_t *a)
{
unsigned __capi;
seq_puts(m, header);
CAP_FOR_EACH_U32(__capi) {
seq_printf(m, "%08x",
a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
}
seq_putc(m, '\n');
}
static inline void task_cap(struct seq_file *m, struct task_struct *p)
{
const struct cred *cred;
kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
rcu_read_lock();
cred = __task_cred(p);
cap_inheritable = cred->cap_inheritable;
cap_permitted = cred->cap_permitted;
cap_effective = cred->cap_effective;
cap_bset = cred->cap_bset;
rcu_read_unlock();
render_cap_t(m, "CapInh:\t", &cap_inheritable);
render_cap_t(m, "CapPrm:\t", &cap_permitted);
render_cap_t(m, "CapEff:\t", &cap_effective);
render_cap_t(m, "CapBnd:\t", &cap_bset);
}
static inline void task_context_switch_counts(struct seq_file *m,
struct task_struct *p)
{
seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
"nonvoluntary_ctxt_switches:\t%lu\n",
p->nvcsw,
p->nivcsw);
}
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_puts(m, "Cpus_allowed:\t");
seq_cpumask(m, &task->cpus_allowed);
seq_putc(m, '\n');
seq_puts(m, "Cpus_allowed_list:\t");
seq_cpumask_list(m, &task->cpus_allowed);
seq_putc(m, '\n');
}
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct mm_struct *mm = get_task_mm(task);
task_name(m, task);
task_state(m, ns, pid, task);
if (mm) {
task_mem(m, mm);
mmput(mm);
}
task_sig(m, task);
task_cap(m, task);
task_cpus_allowed(m, task);
cpuset_task_status_allowed(m, task);
task_context_switch_counts(m, task);
return 0;
}
static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task, int whole)
{
unsigned long vsize, eip, esp, wchan = ~0UL;
long priority, nice;
int tty_pgrp = -1, tty_nr = 0;
sigset_t sigign, sigcatch;
char state;
pid_t ppid = 0, pgid = -1, sid = -1;
int num_threads = 0;
int permitted;
struct mm_struct *mm;
unsigned long long start_time;
unsigned long cmin_flt = 0, cmaj_flt = 0;
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
cputime_t cgtime, gtime;
unsigned long rsslim = 0;
char tcomm[sizeof(task->comm)];
unsigned long flags;
state = *get_task_state(task);
vsize = eip = esp = 0;
permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
if (permitted) {
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
}
}
get_task_comm(tcomm, task);
sigemptyset(&sigign);
sigemptyset(&sigcatch);
cutime = cstime = utime = stime = 0;
cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) {
struct signal_struct *sig = task->signal;
if (sig->tty) {
struct pid *pgrp = tty_get_pgrp(sig->tty);
tty_pgrp = pid_nr_ns(pgrp, ns);
put_pid(pgrp);
tty_nr = new_encode_dev(tty_devnum(sig->tty));
}
num_threads = get_nr_threads(task);
collect_sigign_sigcatch(task, &sigign, &sigcatch);
cmin_flt = sig->cmin_flt;
cmaj_flt = sig->cmaj_flt;
cutime = sig->cutime;
cstime = sig->cstime;
cgtime = sig->cgtime;
rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
/* add up live thread stats at the group level */
if (whole) {
struct task_struct *t = task;
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
gtime += t->gtime;
t = next_thread(t);
} while (t != task);
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
thread_group_times(task, &utime, &stime);
gtime += sig->gtime;
}
sid = task_session_nr_ns(task, ns);
ppid = task_tgid_nr_ns(task->real_parent, ns);
pgid = task_pgrp_nr_ns(task, ns);
unlock_task_sighand(task, &flags);
}
if (permitted && (!whole || num_threads < 2))
wchan = get_wchan(task);
if (!whole) {
min_flt = task->min_flt;
maj_flt = task->maj_flt;
task_times(task, &utime, &stime);
gtime = task->gtime;
}
/* scale priority and nice values from timeslices to -20..20 */
/* to make it look like a "normal" Unix priority/nice value */
priority = task_prio(task);
nice = task_nice(task);
/* Temporary variable needed for gcc-2.96 */
/* convert timespec -> nsec*/
start_time =
(unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC
+ task->real_start_time.tv_nsec;
/* convert nsec -> ticks */
start_time = nsec_to_clock_t(start_time);
seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
seq_put_decimal_ll(m, ' ', ppid);
seq_put_decimal_ll(m, ' ', pgid);
seq_put_decimal_ll(m, ' ', sid);
seq_put_decimal_ll(m, ' ', tty_nr);
seq_put_decimal_ll(m, ' ', tty_pgrp);
seq_put_decimal_ull(m, ' ', task->flags);
seq_put_decimal_ull(m, ' ', min_flt);
seq_put_decimal_ull(m, ' ', cmin_flt);
seq_put_decimal_ull(m, ' ', maj_flt);
seq_put_decimal_ull(m, ' ', cmaj_flt);
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime));
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime));
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime));
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime));
seq_put_decimal_ll(m, ' ', priority);
seq_put_decimal_ll(m, ' ', nice);
seq_put_decimal_ll(m, ' ', num_threads);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', start_time);
seq_put_decimal_ull(m, ' ', vsize);
seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
seq_put_decimal_ull(m, ' ', rsslim);
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
seq_put_decimal_ull(m, ' ', esp);
seq_put_decimal_ull(m, ' ', eip);
/* The signal information here is obsolete.
* It must be decimal for Linux 2.0 compatibility.
* Use /proc/#/status for real-time signals.
*/
seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
seq_put_decimal_ull(m, ' ', wchan);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ll(m, ' ', task->exit_signal);
seq_put_decimal_ll(m, ' ', task_cpu(task));
seq_put_decimal_ull(m, ' ', task->rt_priority);
seq_put_decimal_ull(m, ' ', task->policy);
seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
seq_putc(m, '\n');
if (mm)
mmput(mm);
return 0;
}
int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
return do_task_stat(m, ns, pid, task, 0);
}
int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
return do_task_stat(m, ns, pid, task, 1);
}
int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
}
/*
* For quick read, open code by putting numbers directly
* expected format is
* seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
* size, resident, shared, text, data);
*/
seq_put_decimal_ull(m, 0, size);
seq_put_decimal_ull(m, ' ', resident);
seq_put_decimal_ull(m, ' ', shared);
seq_put_decimal_ull(m, ' ', text);
seq_put_decimal_ull(m, ' ', 0);
seq_put_decimal_ull(m, ' ', data);
seq_put_decimal_ull(m, ' ', 0);
seq_putc(m, '\n');
return 0;
}
| gpl-2.0 |
Hot2-Kernel/Mystery-Kernel | net/netfilter/nf_conntrack_proto_udplite.c | 3083 | 11110 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
enum udplite_conntrack {
UDPLITE_CT_UNREPLIED,
UDPLITE_CT_REPLIED,
UDPLITE_CT_MAX
};
static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
[UDPLITE_CT_UNREPLIED] = 30*HZ,
[UDPLITE_CT_REPLIED] = 180*HZ,
};
static int udplite_net_id __read_mostly;
struct udplite_net {
struct nf_proto_net pn;
unsigned int timeouts[UDPLITE_CT_MAX];
};
static inline struct udplite_net *udplite_pernet(struct net *net)
{
return net_generic(net, udplite_net_id);
}
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
const struct udphdr *hp;
struct udphdr _hdr;
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return false;
tuple->src.u.udp.port = hp->source;
tuple->dst.u.udp.port = hp->dest;
return true;
}
static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u.udp.port = orig->dst.u.udp.port;
tuple->dst.u.udp.port = orig->src.u.udp.port;
return true;
}
/* Print out the per-protocol part of the tuple. */
static int udplite_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.udp.port),
ntohs(tuple->dst.u.udp.port));
}
static unsigned int *udplite_get_timeouts(struct net *net)
{
return udplite_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
static int udplite_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum,
unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDPLITE_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDPLITE_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
static int udplite_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
u_int8_t pf,
unsigned int hooknum)
{
unsigned int udplen = skb->len - dataoff;
const struct udphdr *hdr;
struct udphdr _hdr;
unsigned int cscov;
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hdr == NULL) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: short packet ");
return -NF_ACCEPT;
}
cscov = ntohs(hdr->len);
if (cscov == 0)
cscov = udplen;
else if (cscov < sizeof(*hdr) || cscov > udplen) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: invalid checksum coverage ");
return -NF_ACCEPT;
}
/* UDPLITE mandates checksums */
if (!hdr->check) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: checksum missing ");
return -NF_ACCEPT;
}
/* Checksum invalid? Ignore. */
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
pf)) {
if (LOG_INVALID(net, IPPROTO_UDPLITE))
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_udplite: bad UDPLite checksum ");
return -NF_ACCEPT;
}
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct udplite_net *un = udplite_pernet(net);
/* set default timeouts for UDPlite. */
timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
timeouts[UDPLITE_CT_UNREPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
}
if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
timeouts[UDPLITE_CT_REPLIED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
}
return 0;
}
static int
udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy
udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
[CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
#endif /* CONFIG_SYSCTL */
static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
struct udplite_net *un)
{
#ifdef CONFIG_SYSCTL
if (pn->ctl_table)
return 0;
pn->ctl_table = kmemdup(udplite_sysctl_table,
sizeof(udplite_sysctl_table),
GFP_KERNEL);
if (!pn->ctl_table)
return -ENOMEM;
pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
#endif
return 0;
}
static int udplite_init_net(struct net *net, u_int16_t proto)
{
struct udplite_net *un = udplite_pernet(net);
struct nf_proto_net *pn = &un->pn;
if (!pn->users) {
int i;
for (i = 0 ; i < UDPLITE_CT_MAX; i++)
un->timeouts[i] = udplite_timeouts[i];
}
return udplite_kmemdup_sysctl_table(pn, un);
}
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
{
.l3proto = PF_INET,
.l4proto = IPPROTO_UDPLITE,
.name = "udplite",
.pkt_to_tuple = udplite_pkt_to_tuple,
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
.get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
.obj_size = sizeof(unsigned int) *
CTA_TIMEOUT_UDPLITE_MAX,
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.net_id = &udplite_net_id,
.init_net = udplite_init_net,
};
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_UDPLITE,
.name = "udplite",
.pkt_to_tuple = udplite_pkt_to_tuple,
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
.get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
.ctnl_timeout = {
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
.obj_size = sizeof(unsigned int) *
CTA_TIMEOUT_UDPLITE_MAX,
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.net_id = &udplite_net_id,
.init_net = udplite_init_net,
};
static int udplite_net_init(struct net *net)
{
int ret = 0;
ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite4);
if (ret < 0) {
pr_err("nf_conntrack_udplite4: pernet registration failed.\n");
goto out;
}
ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_udplite6);
if (ret < 0) {
pr_err("nf_conntrack_udplite6: pernet registration failed.\n");
goto cleanup_udplite4;
}
return 0;
cleanup_udplite4:
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4);
out:
return ret;
}
static void udplite_net_exit(struct net *net)
{
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite6);
nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_udplite4);
}
static struct pernet_operations udplite_net_ops = {
.init = udplite_net_init,
.exit = udplite_net_exit,
.id = &udplite_net_id,
.size = sizeof(struct udplite_net),
};
static int __init nf_conntrack_proto_udplite_init(void)
{
int ret;
ret = register_pernet_subsys(&udplite_net_ops);
if (ret < 0)
goto out_pernet;
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
if (ret < 0)
goto out_udplite4;
ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite6);
if (ret < 0)
goto out_udplite6;
return 0;
out_udplite6:
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
out_udplite4:
unregister_pernet_subsys(&udplite_net_ops);
out_pernet:
return ret;
}
static void __exit nf_conntrack_proto_udplite_exit(void)
{
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
unregister_pernet_subsys(&udplite_net_ops);
}
module_init(nf_conntrack_proto_udplite_init);
module_exit(nf_conntrack_proto_udplite_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
NieNs/IM-A840S-kernel-1 | drivers/hwmon/w83793.c | 3083 | 62075 | /*
w83793.c - Linux kernel driver for hardware monitoring
Copyright (C) 2006 Winbond Electronics Corp.
Yuan Mu
Rudolf Marek <r.marek@assembler.cz>
Copyright (C) 2009-2010 Sven Anders <anders@anduras.de>, ANDURAS AG.
Watchdog driver part
(Based partially on fschmd driver,
Copyright 2007-2008 by Hans de Goede)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation - version 2.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
*/
/*
Supports following chips:
Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
w83793 10 12 8 6 0x7b 0x5ca3 yes no
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kref.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
/* Default values */
#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
static int reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
static int timeout = WATCHDOG_TIMEOUT; /* default timeout in minutes */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in minutes. 2<= timeout <=255 (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
Address 0x00, 0x0d, 0x0e, 0x0f in all three banks are reserved
as ID, Bank Select registers
*/
#define W83793_REG_BANKSEL 0x00
#define W83793_REG_VENDORID 0x0d
#define W83793_REG_CHIPID 0x0e
#define W83793_REG_DEVICEID 0x0f
#define W83793_REG_CONFIG 0x40
#define W83793_REG_MFC 0x58
#define W83793_REG_FANIN_CTRL 0x5c
#define W83793_REG_FANIN_SEL 0x5d
#define W83793_REG_I2C_ADDR 0x0b
#define W83793_REG_I2C_SUBADDR 0x0c
#define W83793_REG_VID_INA 0x05
#define W83793_REG_VID_INB 0x06
#define W83793_REG_VID_LATCHA 0x07
#define W83793_REG_VID_LATCHB 0x08
#define W83793_REG_VID_CTRL 0x59
#define W83793_REG_WDT_LOCK 0x01
#define W83793_REG_WDT_ENABLE 0x02
#define W83793_REG_WDT_STATUS 0x03
#define W83793_REG_WDT_TIMEOUT 0x04
static u16 W83793_REG_TEMP_MODE[2] = { 0x5e, 0x5f };
#define TEMP_READ 0
#define TEMP_CRIT 1
#define TEMP_CRIT_HYST 2
#define TEMP_WARN 3
#define TEMP_WARN_HYST 4
/* only crit and crit_hyst affect real-time alarm status
current crit crit_hyst warn warn_hyst */
static u16 W83793_REG_TEMP[][5] = {
{0x1c, 0x78, 0x79, 0x7a, 0x7b},
{0x1d, 0x7c, 0x7d, 0x7e, 0x7f},
{0x1e, 0x80, 0x81, 0x82, 0x83},
{0x1f, 0x84, 0x85, 0x86, 0x87},
{0x20, 0x88, 0x89, 0x8a, 0x8b},
{0x21, 0x8c, 0x8d, 0x8e, 0x8f},
};
#define W83793_REG_TEMP_LOW_BITS 0x22
#define W83793_REG_BEEP(index) (0x53 + (index))
#define W83793_REG_ALARM(index) (0x4b + (index))
#define W83793_REG_CLR_CHASSIS 0x4a /* SMI MASK4 */
#define W83793_REG_IRQ_CTRL 0x50
#define W83793_REG_OVT_CTRL 0x51
#define W83793_REG_OVT_BEEP 0x52
#define IN_READ 0
#define IN_MAX 1
#define IN_LOW 2
static const u16 W83793_REG_IN[][3] = {
/* Current, High, Low */
{0x10, 0x60, 0x61}, /* Vcore A */
{0x11, 0x62, 0x63}, /* Vcore B */
{0x12, 0x64, 0x65}, /* Vtt */
{0x14, 0x6a, 0x6b}, /* VSEN1 */
{0x15, 0x6c, 0x6d}, /* VSEN2 */
{0x16, 0x6e, 0x6f}, /* +3VSEN */
{0x17, 0x70, 0x71}, /* +12VSEN */
{0x18, 0x72, 0x73}, /* 5VDD */
{0x19, 0x74, 0x75}, /* 5VSB */
{0x1a, 0x76, 0x77}, /* VBAT */
};
/* Low Bits of Vcore A/B Vtt Read/High/Low */
static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 };
static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 };
static u8 scale_in_add[] = { 0, 0, 0, 0, 0, 0, 0, 150, 150, 0 };
#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */
#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */
#define W83793_REG_PWM_DEFAULT 0xb2
#define W83793_REG_PWM_ENABLE 0x207
#define W83793_REG_PWM_UPTIME 0xc3 /* Unit in 0.1 second */
#define W83793_REG_PWM_DOWNTIME 0xc4 /* Unit in 0.1 second */
#define W83793_REG_TEMP_CRITICAL 0xc5
#define PWM_DUTY 0
#define PWM_START 1
#define PWM_NONSTOP 2
#define PWM_STOP_TIME 3
#define W83793_REG_PWM(index, nr) (((nr) == 0 ? 0xb3 : \
(nr) == 1 ? 0x220 : 0x218) + (index))
/* bit field, fan1 is bit0, fan2 is bit1 ... */
#define W83793_REG_TEMP_FAN_MAP(index) (0x201 + (index))
#define W83793_REG_TEMP_TOL(index) (0x208 + (index))
#define W83793_REG_TEMP_CRUISE(index) (0x210 + (index))
#define W83793_REG_PWM_STOP_TIME(index) (0x228 + (index))
#define W83793_REG_SF2_TEMP(index, nr) (0x230 + ((index) << 4) + (nr))
#define W83793_REG_SF2_PWM(index, nr) (0x238 + ((index) << 4) + (nr))
static inline unsigned long FAN_FROM_REG(u16 val)
{
if ((val >= 0xfff) || (val == 0))
return 0;
return (1350000UL / val);
}
static inline u16 FAN_TO_REG(long rpm)
{
if (rpm <= 0)
return 0x0fff;
return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long TIME_FROM_REG(u8 reg)
{
return (reg * 100);
}
static inline u8 TIME_TO_REG(unsigned long val)
{
return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
}
static inline long TEMP_FROM_REG(s8 reg)
{
return (reg * 1000);
}
static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
{
return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
}
struct w83793_data {
struct i2c_client *lm75[2];
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
nonvolatile registers */
u8 bank;
u8 vrm;
u8 vid[2];
u8 in[10][3]; /* Register value, read/high/low */
u8 in_low_bits[3]; /* Additional resolution for VCore A/B Vtt */
u16 has_fan; /* Only fan1- fan5 has own pins */
u16 fan[12]; /* Register value combine */
u16 fan_min[12]; /* Register value combine */
s8 temp[6][5]; /* current, crit, crit_hyst,warn, warn_hyst */
u8 temp_low_bits; /* Additional resolution TD1-TD4 */
u8 temp_mode[2]; /* byte 0: Temp D1-D4 mode each has 2 bits
byte 1: Temp R1,R2 mode, each has 1 bit */
u8 temp_critical; /* If reached all fan will be at full speed */
u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */
u8 has_pwm;
u8 has_temp;
u8 has_vid;
u8 pwm_enable; /* Register value, each Temp has 1 bit */
u8 pwm_uptime; /* Register value */
u8 pwm_downtime; /* Register value */
u8 pwm_default; /* All fan default pwm, next poweron valid */
u8 pwm[8][3]; /* Register value */
u8 pwm_stop_time[8];
u8 temp_cruise[6];
u8 alarms[5]; /* realtime status registers */
u8 beeps[5];
u8 beep_enable;
u8 tolerance[3]; /* Temp tolerance(Smart Fan I/II) */
u8 sf2_pwm[6][7]; /* Smart FanII: Fan duty cycle */
u8 sf2_temp[6][7]; /* Smart FanII: Temp level point */
/* watchdog */
struct i2c_client *client;
struct mutex watchdog_lock;
struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
struct miscdevice watchdog_miscdev;
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
unsigned int watchdog_caused_reboot;
int watchdog_timeout; /* watchdog timeout in minutes */
};
/* Somewhat ugly :( global data pointer list with all devices, so that
we can find our device data as when using misc_register. There is no
other method to get to one's device data from the open file-op and
for usage in the reboot notifier callback. */
static LIST_HEAD(watchdog_data_list);
/* Note this lock not only protect list access, but also data.kref access */
static DEFINE_MUTEX(watchdog_data_mutex);
/* Release our data struct when we're detached from the i2c client *and* all
references to our watchdog device are released */
static void w83793_release_resources(struct kref *ref)
{
struct w83793_data *data = container_of(ref, struct w83793_data, kref);
kfree(data);
}
static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
{ "w83793", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
static struct i2c_driver w83793_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83793",
},
.probe = w83793_probe,
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
.address_list = normal_i2c,
};
static ssize_t
show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t
show_vid(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
return sprintf(buf, "%d\n", vid_from_reg(data->vid[index], data->vrm));
}
static ssize_t
store_vrm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83793_data *data = dev_get_drvdata(dev);
data->vrm = simple_strtoul(buf, NULL, 10);
return count;
}
#define ALARM_STATUS 0
#define BEEP_ENABLE 1
static ssize_t
show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index >> 3;
int bit = sensor_attr->index & 0x07;
u8 val;
if (ALARM_STATUS == nr) {
val = (data->alarms[index] >> (bit)) & 1;
} else { /* BEEP_ENABLE */
val = (data->beeps[index] >> (bit)) & 1;
}
return sprintf(buf, "%u\n", val);
}
static ssize_t
store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index >> 3;
int shift = sensor_attr->index & 0x07;
u8 beep_bit = 1 << shift;
u8 val;
val = simple_strtoul(buf, NULL, 10);
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps[index] = w83793_read_value(client, W83793_REG_BEEP(index));
data->beeps[index] &= ~beep_bit;
data->beeps[index] |= val << shift;
w83793_write_value(client, W83793_REG_BEEP(index), data->beeps[index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%u\n", (data->beep_enable >> 1) & 0x01);
}
static ssize_t
store_beep_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val = simple_strtoul(buf, NULL, 10);
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP)
& 0xfd;
data->beep_enable |= val << 1;
w83793_write_value(client, W83793_REG_OVT_BEEP, data->beep_enable);
mutex_unlock(&data->update_lock);
return count;
}
/* Write any value to clear chassis alarm */
static ssize_t
store_chassis_clear_legacy(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val;
dev_warn(dev, "Attribute chassis is deprecated, "
"use intrusion0_alarm instead\n");
mutex_lock(&data->update_lock);
val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
val |= 0x80;
w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
mutex_unlock(&data->update_lock);
return count;
}
/* Write 0 to clear chassis alarm */
static ssize_t
store_chassis_clear(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
u8 reg;
if (strict_strtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
#define FAN_INPUT 0
#define FAN_MIN 1
static ssize_t
show_fan(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val;
if (FAN_INPUT == nr) {
val = data->fan[index] & 0x0fff;
} else {
val = data->fan_min[index] & 0x0fff;
}
return sprintf(buf, "%lu\n", FAN_FROM_REG(val));
}
static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u16 val = FAN_TO_REG(simple_strtoul(buf, NULL, 10));
mutex_lock(&data->update_lock);
data->fan_min[index] = val;
w83793_write_value(client, W83793_REG_FAN_MIN(index),
(val >> 8) & 0xff);
w83793_write_value(client, W83793_REG_FAN_MIN(index) + 1, val & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
struct w83793_data *data = w83793_update_device(dev);
u16 val;
int nr = sensor_attr->nr;
int index = sensor_attr->index;
if (PWM_STOP_TIME == nr)
val = TIME_FROM_REG(data->pwm_stop_time[index]);
else
val = (data->pwm[index][nr] & 0x3f) << 2;
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val;
mutex_lock(&data->update_lock);
if (PWM_STOP_TIME == nr) {
val = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_stop_time[index] = val;
w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
val);
} else {
val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff)
>> 2;
data->pwm[index][nr] =
w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
data->pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_PWM(index, nr),
data->pwm[index][nr]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
long temp = TEMP_FROM_REG(data->temp[index][nr]);
if (TEMP_READ == nr && index < 4) { /* Only TD1-TD4 have low bits */
int low = ((data->temp_low_bits >> (index * 2)) & 0x03) * 250;
temp += temp > 0 ? low : -low;
}
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
store_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
long tmp = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp[index][nr] = TEMP_TO_REG(tmp, -128, 127);
w83793_write_value(client, W83793_REG_TEMP[index][nr],
data->temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/*
TD1-TD4
each has 4 mode:(2 bits)
0: Stop monitor
1: Use internal temp sensor(default)
2: Reserved
3: Use sensor in Intel CPU and get result by PECI
TR1-TR2
each has 2 mode:(1 bit)
0: Disable temp sensor monitor
1: To enable temp sensors monitor
*/
/* 0 disable, 6 PECI */
static u8 TO_TEMP_MODE[] = { 0, 0, 0, 6 };
static ssize_t
show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
u8 tmp;
index = (index < 4) ? 0 : 1;
tmp = (data->temp_mode[index] >> shift) & mask;
/* for the internal sensor, found out if diode or thermistor */
if (tmp == 1) {
tmp = index == 0 ? 3 : 4;
} else {
tmp = TO_TEMP_MODE[tmp];
}
return sprintf(buf, "%d\n", tmp);
}
static ssize_t
store_temp_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
u8 val = simple_strtoul(buf, NULL, 10);
/* transform the sysfs interface values into table above */
if ((val == 6) && (index < 4)) {
val -= 3;
} else if ((val == 3 && index < 4)
|| (val == 4 && index >= 4)) {
/* transform diode or thermistor into internal enable */
val = !!val;
} else {
return -EINVAL;
}
index = (index < 4) ? 0 : 1;
mutex_lock(&data->update_lock);
data->temp_mode[index] =
w83793_read_value(client, W83793_REG_TEMP_MODE[index]);
data->temp_mode[index] &= ~(mask << shift);
data->temp_mode[index] |= val << shift;
w83793_write_value(client, W83793_REG_TEMP_MODE[index],
data->temp_mode[index]);
mutex_unlock(&data->update_lock);
return count;
}
#define SETUP_PWM_DEFAULT 0
#define SETUP_PWM_UPTIME 1 /* Unit in 0.1s */
#define SETUP_PWM_DOWNTIME 2 /* Unit in 0.1s */
#define SETUP_TEMP_CRITICAL 3
static ssize_t
show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct w83793_data *data = w83793_update_device(dev);
u32 val = 0;
if (SETUP_PWM_DEFAULT == nr) {
val = (data->pwm_default & 0x3f) << 2;
} else if (SETUP_PWM_UPTIME == nr) {
val = TIME_FROM_REG(data->pwm_uptime);
} else if (SETUP_PWM_DOWNTIME == nr) {
val = TIME_FROM_REG(data->pwm_downtime);
} else if (SETUP_TEMP_CRITICAL == nr) {
val = TEMP_FROM_REG(data->temp_critical & 0x7f);
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_setup(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (SETUP_PWM_DEFAULT == nr) {
data->pwm_default =
w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
data->pwm_default |= SENSORS_LIMIT(simple_strtoul(buf, NULL,
10),
0, 0xff) >> 2;
w83793_write_value(client, W83793_REG_PWM_DEFAULT,
data->pwm_default);
} else if (SETUP_PWM_UPTIME == nr) {
data->pwm_uptime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_uptime += data->pwm_uptime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_UPTIME,
data->pwm_uptime);
} else if (SETUP_PWM_DOWNTIME == nr) {
data->pwm_downtime = TIME_TO_REG(simple_strtoul(buf, NULL, 10));
data->pwm_downtime += data->pwm_downtime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_DOWNTIME,
data->pwm_downtime);
} else { /* SETUP_TEMP_CRITICAL */
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL) & 0x80;
data->temp_critical |= TEMP_TO_REG(simple_strtol(buf, NULL, 10),
0, 0x7f);
w83793_write_value(client, W83793_REG_TEMP_CRITICAL,
data->temp_critical);
}
mutex_unlock(&data->update_lock);
return count;
}
/*
Temp SmartFan control
TEMP_FAN_MAP
Temp channel control which pwm fan, bitfield, bit 0 indicate pwm1...
It's possible two or more temp channels control the same fan, w83793
always prefers to pick the most critical request and applies it to
the related Fan.
It's possible one fan is not in any mapping of 6 temp channels, this
means the fan is manual mode
TEMP_PWM_ENABLE
Each temp channel has its own SmartFan mode, and temp channel
control fans that are set by TEMP_FAN_MAP
0: SmartFanII mode
1: Thermal Cruise Mode
TEMP_CRUISE
Target temperature in thermal cruise mode, w83793 will try to turn
fan speed to keep the temperature of target device around this
temperature.
TEMP_TOLERANCE
If Temp higher or lower than target with this tolerance, w83793
will take actions to speed up or slow down the fan to keep the
temperature within the tolerance range.
*/
#define TEMP_FAN_MAP 0
#define TEMP_PWM_ENABLE 1
#define TEMP_CRUISE 2
#define TEMP_TOLERANCE 3
static ssize_t
show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u32 val;
if (TEMP_FAN_MAP == nr) {
val = data->temp_fan_map[index];
} else if (TEMP_PWM_ENABLE == nr) {
/* +2 to transfrom into 2 and 3 to conform with sysfs intf */
val = ((data->pwm_enable >> index) & 0x01) + 2;
} else if (TEMP_CRUISE == nr) {
val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
} else { /* TEMP_TOLERANCE */
val = data->tolerance[index >> 1] >> ((index & 0x01) ? 4 : 0);
val = TEMP_FROM_REG(val & 0x0f);
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_ctrl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u32 val;
mutex_lock(&data->update_lock);
if (TEMP_FAN_MAP == nr) {
val = simple_strtoul(buf, NULL, 10) & 0xff;
w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
data->temp_fan_map[index] = val;
} else if (TEMP_PWM_ENABLE == nr) {
val = simple_strtoul(buf, NULL, 10);
if (2 == val || 3 == val) {
data->pwm_enable =
w83793_read_value(client, W83793_REG_PWM_ENABLE);
if (val - 2)
data->pwm_enable |= 1 << index;
else
data->pwm_enable &= ~(1 << index);
w83793_write_value(client, W83793_REG_PWM_ENABLE,
data->pwm_enable);
} else {
mutex_unlock(&data->update_lock);
return -EINVAL;
}
} else if (TEMP_CRUISE == nr) {
data->temp_cruise[index] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(index));
val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
data->temp_cruise[index] &= 0x80;
data->temp_cruise[index] |= val;
w83793_write_value(client, W83793_REG_TEMP_CRUISE(index),
data->temp_cruise[index]);
} else { /* TEMP_TOLERANCE */
int i = index >> 1;
u8 shift = (index & 0x01) ? 4 : 0;
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x0f);
data->tolerance[i] &= ~(0x0f << shift);
data->tolerance[i] |= val << shift;
w83793_write_value(client, W83793_REG_TEMP_TOL(i),
data->tolerance[i]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%d\n", (data->sf2_pwm[index][nr] & 0x3f) << 2);
}
static ssize_t
store_sf2_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 0xff) >> 2;
mutex_lock(&data->update_lock);
data->sf2_pwm[index][nr] =
w83793_read_value(client, W83793_REG_SF2_PWM(index, nr)) & 0xc0;
data->sf2_pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_PWM(index, nr),
data->sf2_pwm[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%ld\n",
TEMP_FROM_REG(data->sf2_temp[index][nr] & 0x7f));
}
static ssize_t
store_sf2_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 val = TEMP_TO_REG(simple_strtol(buf, NULL, 10), 0, 0x7f);
mutex_lock(&data->update_lock);
data->sf2_temp[index][nr] =
w83793_read_value(client, W83793_REG_SF2_TEMP(index, nr)) & 0x80;
data->sf2_temp[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_TEMP(index, nr),
data->sf2_temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* only Vcore A/B and Vtt have additional 2 bits precision */
static ssize_t
show_in(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val = data->in[index][nr];
if (index < 3) {
val <<= 2;
val += (data->in_low_bits[nr] >> (index * 2)) & 0x3;
}
/* voltage inputs 5VDD and 5VSB needs 150mV offset */
val = val * scale_in[index] + scale_in_add[index];
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u32 val;
val =
(simple_strtoul(buf, NULL, 10) +
scale_in[index] / 2) / scale_in[index];
mutex_lock(&data->update_lock);
if (index > 2) {
/* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
if (1 == nr || 2 == nr) {
val -= scale_in_add[index] / scale_in[index];
}
val = SENSORS_LIMIT(val, 0, 255);
} else {
val = SENSORS_LIMIT(val, 0, 0x3FF);
data->in_low_bits[nr] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
data->in_low_bits[nr] &= ~(0x03 << (2 * index));
data->in_low_bits[nr] |= (val & 0x03) << (2 * index);
w83793_write_value(client, W83793_REG_IN_LOW_BITS[nr],
data->in_low_bits[nr]);
val >>= 2;
}
data->in[index][nr] = val;
w83793_write_value(client, W83793_REG_IN[index][nr],
data->in[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define NOT_USED -1
#define SENSOR_ATTR_IN(index) \
SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
IN_READ, index), \
SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_MAX, index), \
SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_LOW, index), \
SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + ((index > 2) ? 1 : 0)), \
SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, \
index + ((index > 2) ? 1 : 0))
#define SENSOR_ATTR_FAN(index) \
SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + 17), \
SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 17), \
SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
NULL, FAN_INPUT, index - 1), \
SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
show_fan, store_fan_min, FAN_MIN, index - 1)
#define SENSOR_ATTR_PWM(index) \
SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
store_pwm, PWM_DUTY, index - 1), \
SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_START, index - 1), \
SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_STOP_TIME, index - 1)
#define SENSOR_ATTR_TEMP(index) \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | S_IWUSR, \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \
SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_CRIT, index - 1), \
SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_warn, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_WARN, index - 1), \
SENSOR_ATTR_2(temp##index##_warn_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
show_alarm_beep, NULL, ALARM_STATUS, index + 11), \
SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 11), \
SENSOR_ATTR_2(temp##index##_auto_channels_pwm, \
S_IRUGO | S_IWUSR, show_sf_ctrl, store_sf_ctrl, \
TEMP_FAN_MAP, index - 1), \
SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
show_sf_ctrl, store_sf_ctrl, TEMP_PWM_ENABLE, \
index - 1), \
SENSOR_ATTR_2(thermal_cruise##index, S_IRUGO | S_IWUSR, \
show_sf_ctrl, store_sf_ctrl, TEMP_CRUISE, index - 1), \
SENSOR_ATTR_2(tolerance##index, S_IRUGO | S_IWUSR, show_sf_ctrl,\
store_sf_ctrl, TEMP_TOLERANCE, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 6, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 6, index - 1)
static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
SENSOR_ATTR_IN(0),
SENSOR_ATTR_IN(1),
SENSOR_ATTR_IN(2),
SENSOR_ATTR_IN(3),
SENSOR_ATTR_IN(4),
SENSOR_ATTR_IN(5),
SENSOR_ATTR_IN(6),
SENSOR_ATTR_IN(7),
SENSOR_ATTR_IN(8),
SENSOR_ATTR_IN(9),
SENSOR_ATTR_FAN(1),
SENSOR_ATTR_FAN(2),
SENSOR_ATTR_FAN(3),
SENSOR_ATTR_FAN(4),
SENSOR_ATTR_FAN(5),
SENSOR_ATTR_PWM(1),
SENSOR_ATTR_PWM(2),
SENSOR_ATTR_PWM(3),
};
static struct sensor_device_attribute_2 w83793_temp[] = {
SENSOR_ATTR_TEMP(1),
SENSOR_ATTR_TEMP(2),
SENSOR_ATTR_TEMP(3),
SENSOR_ATTR_TEMP(4),
SENSOR_ATTR_TEMP(5),
SENSOR_ATTR_TEMP(6),
};
/* Fan6-Fan12 */
static struct sensor_device_attribute_2 w83793_left_fan[] = {
SENSOR_ATTR_FAN(6),
SENSOR_ATTR_FAN(7),
SENSOR_ATTR_FAN(8),
SENSOR_ATTR_FAN(9),
SENSOR_ATTR_FAN(10),
SENSOR_ATTR_FAN(11),
SENSOR_ATTR_FAN(12),
};
/* Pwm4-Pwm8 */
static struct sensor_device_attribute_2 w83793_left_pwm[] = {
SENSOR_ATTR_PWM(4),
SENSOR_ATTR_PWM(5),
SENSOR_ATTR_PWM(6),
SENSOR_ATTR_PWM(7),
SENSOR_ATTR_PWM(8),
};
static struct sensor_device_attribute_2 w83793_vid[] = {
SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
};
static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
static struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear_legacy, ALARM_STATUS, 30),
SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear, ALARM_STATUS, 30),
SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
store_beep_enable, NOT_USED, NOT_USED),
SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
SENSOR_ATTR_2(temp_critical, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_TEMP_CRITICAL, NOT_USED),
};
static void w83793_init_client(struct i2c_client *client)
{
if (reset) {
w83793_write_value(client, W83793_REG_CONFIG, 0x80);
}
/* Start monitoring */
w83793_write_value(client, W83793_REG_CONFIG,
w83793_read_value(client, W83793_REG_CONFIG) | 0x01);
}
/*
* Watchdog routines
*/
static int watchdog_set_timeout(struct w83793_data *data, int timeout)
{
int ret, mtimeout;
mtimeout = DIV_ROUND_UP(timeout, 60);
if (mtimeout > 255)
return -EINVAL;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
data->watchdog_timeout = mtimeout;
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
ret = mtimeout * 60;
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_get_timeout(struct w83793_data *data)
{
int timeout;
mutex_lock(&data->watchdog_lock);
timeout = data->watchdog_timeout * 60;
mutex_unlock(&data->watchdog_lock);
return timeout;
}
static int watchdog_trigger(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_enable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set initial timeout */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
/* Enable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0x55);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_disable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Disable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0xAA);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_open(struct inode *inode, struct file *filp)
{
struct w83793_data *pos, *data = NULL;
int watchdog_is_open;
/* We get called from drivers/char/misc.c with misc_mtx hold, and we
call misc_register() from w83793_probe() with watchdog_data_mutex
hold, as misc_register() takes the misc_mtx lock, this is a possible
deadlock, so we use mutex_trylock here. */
if (!mutex_trylock(&watchdog_data_mutex))
return -ERESTARTSYS;
list_for_each_entry(pos, &watchdog_data_list, list) {
if (pos->watchdog_miscdev.minor == iminor(inode)) {
data = pos;
break;
}
}
/* Check, if device is already open */
watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
/* Increase data reference counter (if not already done).
Note we can never not have found data, so we don't check for this */
if (!watchdog_is_open)
kref_get(&data->kref);
mutex_unlock(&watchdog_data_mutex);
/* Check, if device is already open and possibly issue error */
if (watchdog_is_open)
return -EBUSY;
/* Enable Soft Watchdog */
watchdog_enable(data);
/* Store pointer to data into filp's private data */
filp->private_data = data;
return nonseekable_open(inode, filp);
}
static int watchdog_close(struct inode *inode, struct file *filp)
{
struct w83793_data *data = filp->private_data;
if (data->watchdog_expect_close) {
watchdog_disable(data);
data->watchdog_expect_close = 0;
} else {
watchdog_trigger(data);
dev_crit(&data->client->dev,
"unexpected close, not stopping watchdog!\n");
}
clear_bit(0, &data->watchdog_is_open);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static ssize_t watchdog_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
ssize_t ret;
struct w83793_data *data = filp->private_data;
if (count) {
if (!nowayout) {
size_t i;
/* Clear it in case it was set with a previous write */
data->watchdog_expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
data->watchdog_expect_close = 1;
}
}
ret = watchdog_trigger(data);
if (ret < 0)
return ret;
}
return count;
}
static long watchdog_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_CARDRESET,
.identity = "w83793 watchdog"
};
int val, ret = 0;
struct w83793_data *data = filp->private_data;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
ident.options |= WDIOF_MAGICCLOSE;
if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
val = data->watchdog_caused_reboot ? WDIOF_CARDRESET : 0;
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
ret = watchdog_trigger(data);
break;
case WDIOC_GETTIMEOUT:
val = watchdog_get_timeout(data);
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_SETTIMEOUT:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
ret = watchdog_set_timeout(data, val);
if (ret > 0)
ret = put_user(ret, (int __user *)arg);
break;
case WDIOC_SETOPTIONS:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
if (val & WDIOS_DISABLECARD)
ret = watchdog_disable(data);
else if (val & WDIOS_ENABLECARD)
ret = watchdog_enable(data);
else
ret = -EINVAL;
break;
default:
ret = -ENOTTY;
}
return ret;
}
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
};
/*
* Notifier for system down
*/
static int watchdog_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
struct w83793_data *data = NULL;
if (code == SYS_DOWN || code == SYS_HALT) {
/* Disable each registered watchdog */
mutex_lock(&watchdog_data_mutex);
list_for_each_entry(data, &watchdog_data_list, list) {
if (data->watchdog_miscdev.minor)
watchdog_disable(data);
}
mutex_unlock(&watchdog_data_mutex);
}
return NOTIFY_DONE;
}
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block watchdog_notifier = {
.notifier_call = watchdog_notify_sys,
};
/*
* Init / remove routines
*/
static int w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
int i, tmp;
/* Unregister the watchdog (if registered) */
if (data->watchdog_miscdev.minor) {
misc_deregister(&data->watchdog_miscdev);
if (data->watchdog_is_open) {
dev_warn(&client->dev,
"i2c client detached with watchdog open! "
"Stopping watchdog.\n");
watchdog_disable(data);
}
mutex_lock(&watchdog_data_mutex);
list_del(&data->list);
mutex_unlock(&watchdog_data_mutex);
/* Tell the watchdog code the client is gone */
mutex_lock(&data->watchdog_lock);
data->client = NULL;
mutex_unlock(&data->watchdog_lock);
}
/* Reset Configuration Register to Disable Watch Dog Registers */
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp & ~0x04);
unregister_reboot_notifier(&watchdog_notifier);
hwmon_device_unregister(data->hwmon_dev);
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
device_remove_file(dev, &dev_attr_vrm);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static int
w83793_detect_subclients(struct i2c_client *client)
{
int i, id, err;
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48
|| force_subclients[i] > 0x4f) {
dev_err(&client->dev,
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -EINVAL;
goto ERROR_SC_0;
}
}
w83793_write_value(client, W83793_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08)) {
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
}
if (!(tmp & 0x80)) {
if ((data->lm75[0] != NULL)
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
err = -ENODEV;
goto ERROR_SC_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((tmp >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
ERROR_SC_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
u8 tmp, bank, chip_id;
struct i2c_adapter *adapter = client->adapter;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
return -ENODEV;
}
bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
tmp = bank & 0x80 ? 0x5c : 0xa3;
/* Check Winbond vendor ID */
if (tmp != i2c_smbus_read_byte_data(client, W83793_REG_VENDORID)) {
pr_debug("w83793: Detection failed at check vendor id\n");
return -ENODEV;
}
/* If Winbond chip, address of chip and W83793_REG_I2C_ADDR
should match */
if ((bank & 0x07) == 0
&& i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) !=
(address << 1)) {
pr_debug("w83793: Detection failed at check i2c addr\n");
return -ENODEV;
}
/* Determine the chip type now */
chip_id = i2c_smbus_read_byte_data(client, W83793_REG_CHIPID);
if (chip_id != 0x7b)
return -ENODEV;
strlcpy(info->type, "w83793", I2C_NAME_SIZE);
return 0;
}
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
int files_temp = ARRAY_SIZE(w83793_temp) / 6;
data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
mutex_init(&data->update_lock);
mutex_init(&data->watchdog_lock);
INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
/* Store client pointer in our data struct for watchdog usage
(where the client is found through a data ptr instead of the
otherway around) */
data->client = client;
err = w83793_detect_subclients(client);
if (err)
goto free_mem;
/* Initialize the chip */
w83793_init_client(client);
/*
Only fan 1-5 has their own input pins,
Pwm 1-3 has their own pins
*/
data->has_fan = 0x1f;
data->has_pwm = 0x07;
tmp = w83793_read_value(client, W83793_REG_MFC);
val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
/* check the function of pins 49-56 */
if (tmp & 0x80) {
data->has_vid |= 0x2; /* has VIDB */
} else {
data->has_pwm |= 0x18; /* pwm 4,5 */
if (val & 0x01) { /* fan 6 */
data->has_fan |= 0x20;
data->has_pwm |= 0x20;
}
if (val & 0x02) { /* fan 7 */
data->has_fan |= 0x40;
data->has_pwm |= 0x40;
}
if (!(tmp & 0x40) && (val & 0x04)) { /* fan 8 */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
}
/* check the function of pins 37-40 */
if (!(tmp & 0x29))
data->has_vid |= 0x1; /* has VIDA */
if (0x08 == (tmp & 0x0c)) {
if (val & 0x08) /* fan 9 */
data->has_fan |= 0x100;
if (val & 0x10) /* fan 10 */
data->has_fan |= 0x200;
}
if (0x20 == (tmp & 0x30)) {
if (val & 0x20) /* fan 11 */
data->has_fan |= 0x400;
if (val & 0x40) /* fan 12 */
data->has_fan |= 0x800;
}
if ((tmp & 0x01) && (val & 0x04)) { /* fan 8, second location */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
tmp = w83793_read_value(client, W83793_REG_FANIN_SEL);
if ((tmp & 0x01) && (val & 0x08)) { /* fan 9, second location */
data->has_fan |= 0x100;
}
if ((tmp & 0x02) && (val & 0x10)) { /* fan 10, second location */
data->has_fan |= 0x200;
}
if ((tmp & 0x04) && (val & 0x20)) { /* fan 11, second location */
data->has_fan |= 0x400;
}
if ((tmp & 0x08) && (val & 0x40)) { /* fan 12, second location */
data->has_fan |= 0x800;
}
/* check the temp1-6 mode, ignore former AMDSI selected inputs */
tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[0]);
if (tmp & 0x01)
data->has_temp |= 0x01;
if (tmp & 0x04)
data->has_temp |= 0x02;
if (tmp & 0x10)
data->has_temp |= 0x04;
if (tmp & 0x40)
data->has_temp |= 0x08;
tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[1]);
if (tmp & 0x01)
data->has_temp |= 0x10;
if (tmp & 0x02)
data->has_temp |= 0x20;
/* Register sysfs hooks */
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
err = device_create_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) {
if (!(data->has_vid & (1 << i)))
continue;
err = device_create_file(dev, &w83793_vid[i].dev_attr);
if (err)
goto exit_remove;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
err = device_create_file(dev, &dev_attr_vrm);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
err = device_create_file(dev, &sda_single_files[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < 6; i++) {
int j;
if (!(data->has_temp & (1 << i)))
continue;
for (j = 0; j < files_temp; j++) {
err = device_create_file(dev,
&w83793_temp[(i) * files_temp
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 5; i < 12; i++) {
int j;
if (!(data->has_fan & (1 << i)))
continue;
for (j = 0; j < files_fan; j++) {
err = device_create_file(dev,
&w83793_left_fan[(i - 5) * files_fan
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 3; i < 8; i++) {
int j;
if (!(data->has_pwm & (1 << i)))
continue;
for (j = 0; j < files_pwm; j++) {
err = device_create_file(dev,
&w83793_left_pwm[(i - 3) * files_pwm
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
/* Watchdog initialization */
/* Register boot notifier */
err = register_reboot_notifier(&watchdog_notifier);
if (err != 0) {
dev_err(&client->dev,
"cannot register reboot notifier (err=%d)\n", err);
goto exit_devunreg;
}
/* Enable Watchdog registers.
Set Configuration Register to Enable Watch Dog Registers
(Bit 2) = XXXX, X1XX. */
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp | 0x04);
/* Set the default watchdog timeout */
data->watchdog_timeout = timeout;
/* Check, if last reboot was caused by watchdog */
data->watchdog_caused_reboot =
w83793_read_value(data->client, W83793_REG_WDT_STATUS) & 0x01;
/* Disable Soft Watchdog during initialiation */
watchdog_disable(data);
/* We take the data_mutex lock early so that watchdog_open() cannot
run when misc_register() has completed, but we've not yet added
our data to the watchdog_data_list (and set the default timeout) */
mutex_lock(&watchdog_data_mutex);
for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
/* Register our watchdog part */
snprintf(data->watchdog_name, sizeof(data->watchdog_name),
"watchdog%c", (i == 0) ? '\0' : ('0' + i));
data->watchdog_miscdev.name = data->watchdog_name;
data->watchdog_miscdev.fops = &watchdog_fops;
data->watchdog_miscdev.minor = watchdog_minors[i];
err = misc_register(&data->watchdog_miscdev);
if (err == -EBUSY)
continue;
if (err) {
data->watchdog_miscdev.minor = 0;
dev_err(&client->dev,
"Registering watchdog chardev: %d\n", err);
break;
}
list_add(&data->list, &watchdog_data_list);
dev_info(&client->dev,
"Registered watchdog chardev major 10, minor: %d\n",
watchdog_minors[i]);
break;
}
if (i == ARRAY_SIZE(watchdog_minors)) {
data->watchdog_miscdev.minor = 0;
dev_warn(&client->dev, "Couldn't register watchdog chardev "
"(due to no free minor)\n");
}
mutex_unlock(&watchdog_data_mutex);
return 0;
/* Unregister hwmon device */
exit_devunreg:
hwmon_device_unregister(data->hwmon_dev);
/* Unregister sysfs hooks */
exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev, &w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
return err;
}
static void w83793_update_nonvolatile(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i, j;
/*
They are somewhat "stable" registers, and to update them every time
takes so much time, it's just not worthy. Update them in a long
interval to avoid exception.
*/
if (!(time_after(jiffies, data->last_nonvolatile + HZ * 300)
|| !data->valid))
return;
/* update voltage limits */
for (i = 1; i < 3; i++) {
for (j = 0; j < ARRAY_SIZE(data->in); j++) {
data->in[j][i] =
w83793_read_value(client, W83793_REG_IN[j][i]);
}
data->in_low_bits[i] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[i]);
}
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
/* Update the Fan measured value and limits */
if (!(data->has_fan & (1 << i))) {
continue;
}
data->fan_min[i] =
w83793_read_value(client, W83793_REG_FAN_MIN(i)) << 8;
data->fan_min[i] |=
w83793_read_value(client, W83793_REG_FAN_MIN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp_fan_map[i] =
w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i));
for (j = 1; j < 5; j++) {
data->temp[i][j] =
w83793_read_value(client, W83793_REG_TEMP[i][j]);
}
data->temp_cruise[i] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(i));
for (j = 0; j < 7; j++) {
data->sf2_pwm[i][j] =
w83793_read_value(client, W83793_REG_SF2_PWM(i, j));
data->sf2_temp[i][j] =
w83793_read_value(client,
W83793_REG_SF2_TEMP(i, j));
}
}
for (i = 0; i < ARRAY_SIZE(data->temp_mode); i++)
data->temp_mode[i] =
w83793_read_value(client, W83793_REG_TEMP_MODE[i]);
for (i = 0; i < ARRAY_SIZE(data->tolerance); i++) {
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
}
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (!(data->has_pwm & (1 << i)))
continue;
data->pwm[i][PWM_NONSTOP] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_NONSTOP));
data->pwm[i][PWM_START] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_START));
data->pwm_stop_time[i] =
w83793_read_value(client, W83793_REG_PWM_STOP_TIME(i));
}
data->pwm_default = w83793_read_value(client, W83793_REG_PWM_DEFAULT);
data->pwm_enable = w83793_read_value(client, W83793_REG_PWM_ENABLE);
data->pwm_uptime = w83793_read_value(client, W83793_REG_PWM_UPTIME);
data->pwm_downtime = w83793_read_value(client, W83793_REG_PWM_DOWNTIME);
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP);
for (i = 0; i < ARRAY_SIZE(data->beeps); i++) {
data->beeps[i] = w83793_read_value(client, W83793_REG_BEEP(i));
}
data->last_nonvolatile = jiffies;
}
static struct w83793_data *w83793_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
if (!(time_after(jiffies, data->last_updated + HZ * 2)
|| !data->valid))
goto END;
/* Update the voltages measured value and limits */
for (i = 0; i < ARRAY_SIZE(data->in); i++)
data->in[i][IN_READ] =
w83793_read_value(client, W83793_REG_IN[i][IN_READ]);
data->in_low_bits[IN_READ] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[IN_READ]);
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
if (!(data->has_fan & (1 << i))) {
continue;
}
data->fan[i] =
w83793_read_value(client, W83793_REG_FAN(i)) << 8;
data->fan[i] |=
w83793_read_value(client, W83793_REG_FAN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp[i][TEMP_READ] =
w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]);
}
data->temp_low_bits =
w83793_read_value(client, W83793_REG_TEMP_LOW_BITS);
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (data->has_pwm & (1 << i))
data->pwm[i][PWM_DUTY] =
w83793_read_value(client,
W83793_REG_PWM(i, PWM_DUTY));
}
for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
data->alarms[i] =
w83793_read_value(client, W83793_REG_ALARM(i));
if (data->has_vid & 0x01)
data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA);
if (data->has_vid & 0x02)
data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
w83793_update_nonvolatile(dev);
data->last_updated = jiffies;
data->valid = 1;
END:
mutex_unlock(&data->update_lock);
return data;
}
/* Ignore the possibility that somebody change bank outside the driver
Must be called with data->update_lock held, except during initialization */
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
u8 res = 0xff;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
if (i2c_smbus_write_byte_data
(client, W83793_REG_BANKSEL, new_bank) >= 0)
data->bank = new_bank;
else {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, read reg 0x%x error\n",
new_bank, data->bank, reg);
res = 0x0; /* read 0x0 from the chip */
goto END;
}
}
res = i2c_smbus_read_byte_data(client, reg & 0xff);
END:
return res;
}
/* Must be called with data->update_lock held, except during initialization */
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value)
{
struct w83793_data *data = i2c_get_clientdata(client);
int res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
if ((res = i2c_smbus_write_byte_data
(client, W83793_REG_BANKSEL, new_bank)) >= 0)
data->bank = new_bank;
else {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, write reg 0x%x error\n",
new_bank, data->bank, reg);
goto END;
}
}
res = i2c_smbus_write_byte_data(client, reg & 0xff, value);
END:
return res;
}
static int __init sensors_w83793_init(void)
{
return i2c_add_driver(&w83793_driver);
}
static void __exit sensors_w83793_exit(void)
{
i2c_del_driver(&w83793_driver);
}
MODULE_AUTHOR("Yuan Mu, Sven Anders");
MODULE_DESCRIPTION("w83793 driver");
MODULE_LICENSE("GPL");
module_init(sensors_w83793_init);
module_exit(sensors_w83793_exit);
| gpl-2.0 |
Anik1199/Kernel_taoshan | sound/soc/pxa/pxa-ssp.c | 4107 | 20043 | /*
* pxa-ssp.c -- ALSA Soc Audio Layer
*
* Copyright 2005,2008 Wolfson Microelectronics PLC.
* Author: Liam Girdwood
* Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* TODO:
* o Test network mode for > 16bit sample size
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pxa2xx_ssp.h>
#include <asm/irq.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <mach/audio.h>
#include "../../arm/pxa2xx-pcm.h"
#include "pxa-ssp.h"
/*
* SSP audio private data
*/
struct ssp_priv {
struct ssp_device *ssp;
unsigned int sysclk;
int dai_fmt;
#ifdef CONFIG_PM
uint32_t cr0;
uint32_t cr1;
uint32_t to;
uint32_t psp;
#endif
};
static void dump_registers(struct ssp_device *ssp)
{
dev_dbg(&ssp->pdev->dev, "SSCR0 0x%08x SSCR1 0x%08x SSTO 0x%08x\n",
pxa_ssp_read_reg(ssp, SSCR0), pxa_ssp_read_reg(ssp, SSCR1),
pxa_ssp_read_reg(ssp, SSTO));
dev_dbg(&ssp->pdev->dev, "SSPSP 0x%08x SSSR 0x%08x SSACD 0x%08x\n",
pxa_ssp_read_reg(ssp, SSPSP), pxa_ssp_read_reg(ssp, SSSR),
pxa_ssp_read_reg(ssp, SSACD));
}
static void pxa_ssp_enable(struct ssp_device *ssp)
{
uint32_t sscr0;
sscr0 = __raw_readl(ssp->mmio_base + SSCR0) | SSCR0_SSE;
__raw_writel(sscr0, ssp->mmio_base + SSCR0);
}
static void pxa_ssp_disable(struct ssp_device *ssp)
{
uint32_t sscr0;
sscr0 = __raw_readl(ssp->mmio_base + SSCR0) & ~SSCR0_SSE;
__raw_writel(sscr0, ssp->mmio_base + SSCR0);
}
struct pxa2xx_pcm_dma_data {
struct pxa2xx_pcm_dma_params params;
char name[20];
};
static struct pxa2xx_pcm_dma_params *
pxa_ssp_get_dma_params(struct ssp_device *ssp, int width4, int out)
{
struct pxa2xx_pcm_dma_data *dma;
dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL);
if (dma == NULL)
return NULL;
snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id,
width4 ? "32-bit" : "16-bit", out ? "out" : "in");
dma->params.name = dma->name;
dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx);
dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) :
(DCMD_INCTRGADDR | DCMD_FLOWSRC)) |
(width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16;
dma->params.dev_addr = ssp->phys_base + SSDR;
return &dma->params;
}
static int pxa_ssp_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int ret = 0;
if (!cpu_dai->active) {
clk_enable(ssp->clk);
pxa_ssp_disable(ssp);
}
kfree(snd_soc_dai_get_dma_data(cpu_dai, substream));
snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
return ret;
}
static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
if (!cpu_dai->active) {
pxa_ssp_disable(ssp);
clk_disable(ssp->clk);
}
kfree(snd_soc_dai_get_dma_data(cpu_dai, substream));
snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
}
#ifdef CONFIG_PM
static int pxa_ssp_suspend(struct snd_soc_dai *cpu_dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
if (!cpu_dai->active)
clk_enable(ssp->clk);
priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0);
priv->cr1 = __raw_readl(ssp->mmio_base + SSCR1);
priv->to = __raw_readl(ssp->mmio_base + SSTO);
priv->psp = __raw_readl(ssp->mmio_base + SSPSP);
pxa_ssp_disable(ssp);
clk_disable(ssp->clk);
return 0;
}
static int pxa_ssp_resume(struct snd_soc_dai *cpu_dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE;
clk_enable(ssp->clk);
__raw_writel(sssr, ssp->mmio_base + SSSR);
__raw_writel(priv->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0);
__raw_writel(priv->cr1, ssp->mmio_base + SSCR1);
__raw_writel(priv->to, ssp->mmio_base + SSTO);
__raw_writel(priv->psp, ssp->mmio_base + SSPSP);
if (cpu_dai->active)
pxa_ssp_enable(ssp);
else
clk_disable(ssp->clk);
return 0;
}
#else
#define pxa_ssp_suspend NULL
#define pxa_ssp_resume NULL
#endif
/**
* ssp_set_clkdiv - set SSP clock divider
* @div: serial clock rate divider
*/
static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
{
u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) {
sscr0 &= ~0x0000ff00;
sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
} else {
sscr0 &= ~0x000fff00;
sscr0 |= (div - 1) << 8; /* 1..4096 */
}
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
}
/**
* pxa_ssp_get_clkdiv - get SSP clock divider
*/
static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
{
u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
u32 div;
if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP)
div = ((sscr0 >> 8) & 0xff) * 2 + 2;
else
div = ((sscr0 >> 8) & 0xfff) + 1;
return div;
}
/*
* Set the SSP ports SYSCLK.
*/
static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int val;
u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0) &
~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS);
dev_dbg(&ssp->pdev->dev,
"pxa_ssp_set_dai_sysclk id: %d, clk_id %d, freq %u\n",
cpu_dai->id, clk_id, freq);
switch (clk_id) {
case PXA_SSP_CLK_NET_PLL:
sscr0 |= SSCR0_MOD;
break;
case PXA_SSP_CLK_PLL:
/* Internal PLL is fixed */
if (cpu_is_pxa25x())
priv->sysclk = 1843200;
else
priv->sysclk = 13000000;
break;
case PXA_SSP_CLK_EXT:
priv->sysclk = freq;
sscr0 |= SSCR0_ECS;
break;
case PXA_SSP_CLK_NET:
priv->sysclk = freq;
sscr0 |= SSCR0_NCS | SSCR0_MOD;
break;
case PXA_SSP_CLK_AUDIO:
priv->sysclk = 0;
pxa_ssp_set_scr(ssp, 1);
sscr0 |= SSCR0_ACS;
break;
default:
return -ENODEV;
}
/* The SSP clock must be disabled when changing SSP clock mode
* on PXA2xx. On PXA3xx it must be enabled when doing so. */
if (!cpu_is_pxa3xx())
clk_disable(ssp->clk);
val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
pxa_ssp_write_reg(ssp, SSCR0, val);
if (!cpu_is_pxa3xx())
clk_enable(ssp->clk);
return 0;
}
/*
* Set the SSP clock dividers.
*/
static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int val;
switch (div_id) {
case PXA_SSP_AUDIO_DIV_ACDS:
val = (pxa_ssp_read_reg(ssp, SSACD) & ~0x7) | SSACD_ACDS(div);
pxa_ssp_write_reg(ssp, SSACD, val);
break;
case PXA_SSP_AUDIO_DIV_SCDB:
val = pxa_ssp_read_reg(ssp, SSACD);
val &= ~SSACD_SCDB;
#if defined(CONFIG_PXA3xx)
if (cpu_is_pxa3xx())
val &= ~SSACD_SCDX8;
#endif
switch (div) {
case PXA_SSP_CLK_SCDB_1:
val |= SSACD_SCDB;
break;
case PXA_SSP_CLK_SCDB_4:
break;
#if defined(CONFIG_PXA3xx)
case PXA_SSP_CLK_SCDB_8:
if (cpu_is_pxa3xx())
val |= SSACD_SCDX8;
else
return -EINVAL;
break;
#endif
default:
return -EINVAL;
}
pxa_ssp_write_reg(ssp, SSACD, val);
break;
case PXA_SSP_DIV_SCR:
pxa_ssp_set_scr(ssp, div);
break;
default:
return -ENODEV;
}
return 0;
}
/*
* Configure the PLL frequency pxa27x and (afaik - pxa320 only)
*/
static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
#if defined(CONFIG_PXA3xx)
if (cpu_is_pxa3xx())
pxa_ssp_write_reg(ssp, SSACDD, 0);
#endif
switch (freq_out) {
case 5622000:
break;
case 11345000:
ssacd |= (0x1 << 4);
break;
case 12235000:
ssacd |= (0x2 << 4);
break;
case 14857000:
ssacd |= (0x3 << 4);
break;
case 32842000:
ssacd |= (0x4 << 4);
break;
case 48000000:
ssacd |= (0x5 << 4);
break;
case 0:
/* Disable */
break;
default:
#ifdef CONFIG_PXA3xx
/* PXA3xx has a clock ditherer which can be used to generate
* a wider range of frequencies - calculate a value for it.
*/
if (cpu_is_pxa3xx()) {
u32 val;
u64 tmp = 19968;
tmp *= 1000000;
do_div(tmp, freq_out);
val = tmp;
val = (val << 16) | 64;
pxa_ssp_write_reg(ssp, SSACDD, val);
ssacd |= (0x6 << 4);
dev_dbg(&ssp->pdev->dev,
"Using SSACDD %x to supply %uHz\n",
val, freq_out);
break;
}
#endif
return -EINVAL;
}
pxa_ssp_write_reg(ssp, SSACD, ssacd);
return 0;
}
/*
* Set the active slots in TDM/Network mode
*/
static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
u32 sscr0;
sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS);
/* set slot width */
if (slot_width > 16)
sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16);
else
sscr0 |= SSCR0_DataSize(slot_width);
if (slots > 1) {
/* enable network mode */
sscr0 |= SSCR0_MOD;
/* set number of active slots */
sscr0 |= SSCR0_SlotsPerFrm(slots);
/* set active slot mask */
pxa_ssp_write_reg(ssp, SSTSA, tx_mask);
pxa_ssp_write_reg(ssp, SSRSA, rx_mask);
}
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
return 0;
}
/*
* Tristate the SSP DAI lines
*/
static int pxa_ssp_set_dai_tristate(struct snd_soc_dai *cpu_dai,
int tristate)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
u32 sscr1;
sscr1 = pxa_ssp_read_reg(ssp, SSCR1);
if (tristate)
sscr1 &= ~SSCR1_TTE;
else
sscr1 |= SSCR1_TTE;
pxa_ssp_write_reg(ssp, SSCR1, sscr1);
return 0;
}
/*
* Set up the SSP DAI format.
* The SSP Port must be inactive before calling this function as the
* physical interface format is changed.
*/
static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
u32 sscr0, sscr1, sspsp, scfr;
/* check if we need to change anything at all */
if (priv->dai_fmt == fmt)
return 0;
/* we can only change the settings if the port is not in use */
if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
dev_err(&ssp->pdev->dev,
"can't change hardware dai format: stream is in use");
return -EINVAL;
}
/* reset port settings */
sscr0 = pxa_ssp_read_reg(ssp, SSCR0) &
~(SSCR0_ECS | SSCR0_NCS | SSCR0_MOD | SSCR0_ACS);
sscr1 = SSCR1_RxTresh(8) | SSCR1_TxTresh(7);
sspsp = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
sscr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR | SSCR1_SCFR;
break;
case SND_SOC_DAIFMT_CBM_CFS:
sscr1 |= SSCR1_SCLKDIR | SSCR1_SCFR;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
sspsp |= SSPSP_SFRMP;
break;
case SND_SOC_DAIFMT_NB_IF:
break;
case SND_SOC_DAIFMT_IB_IF:
sspsp |= SSPSP_SCMODE(2);
break;
case SND_SOC_DAIFMT_IB_NF:
sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
sscr0 |= SSCR0_PSP;
sscr1 |= SSCR1_RWOT | SSCR1_TRAIL;
/* See hw_params() */
break;
case SND_SOC_DAIFMT_DSP_A:
sspsp |= SSPSP_FSRT;
case SND_SOC_DAIFMT_DSP_B:
sscr0 |= SSCR0_MOD | SSCR0_PSP;
sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
break;
default:
return -EINVAL;
}
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
pxa_ssp_write_reg(ssp, SSCR1, sscr1);
pxa_ssp_write_reg(ssp, SSPSP, sspsp);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
case SND_SOC_DAIFMT_CBM_CFS:
scfr = pxa_ssp_read_reg(ssp, SSCR1) | SSCR1_SCFR;
pxa_ssp_write_reg(ssp, SSCR1, scfr);
while (pxa_ssp_read_reg(ssp, SSSR) & SSSR_BSY)
cpu_relax();
break;
}
dump_registers(ssp);
/* Since we are configuring the timings for the format by hand
* we have to defer some things until hw_params() where we
* know parameters like the sample size.
*/
priv->dai_fmt = fmt;
return 0;
}
/*
* Set the SSP audio DMA parameters and sample size.
* Can be called multiple times by oss emulation.
*/
static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int chn = params_channels(params);
u32 sscr0;
u32 sspsp;
int width = snd_pcm_format_physical_width(params_format(params));
int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf;
struct pxa2xx_pcm_dma_params *dma_data;
dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
/* generate correct DMA params */
kfree(dma_data);
/* Network mode with one active slot (ttsa == 1) can be used
* to force 16-bit frame width on the wire (for S16_LE), even
* with two channels. Use 16-bit DMA transfers for this case.
*/
dma_data = pxa_ssp_get_dma_params(ssp,
((chn == 2) && (ttsa != 1)) || (width == 32),
substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
/* we can only change the settings if the port is not in use */
if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE)
return 0;
/* clear selected SSP bits */
sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~(SSCR0_DSS | SSCR0_EDSS);
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
#ifdef CONFIG_PXA3xx
if (cpu_is_pxa3xx())
sscr0 |= SSCR0_FPCKE;
#endif
sscr0 |= SSCR0_DataSize(16);
break;
case SNDRV_PCM_FORMAT_S24_LE:
sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(8));
break;
case SNDRV_PCM_FORMAT_S32_LE:
sscr0 |= (SSCR0_EDSS | SSCR0_DataSize(16));
break;
}
pxa_ssp_write_reg(ssp, SSCR0, sscr0);
switch (priv->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
sspsp = pxa_ssp_read_reg(ssp, SSPSP);
if ((pxa_ssp_get_scr(ssp) == 4) && (width == 16)) {
/* This is a special case where the bitclk is 64fs
* and we're not dealing with 2*32 bits of audio
* samples.
*
* The SSP values used for that are all found out by
* trying and failing a lot; some of the registers
* needed for that mode are only available on PXA3xx.
*/
#ifdef CONFIG_PXA3xx
if (!cpu_is_pxa3xx())
return -EINVAL;
sspsp |= SSPSP_SFRMWDTH(width * 2);
sspsp |= SSPSP_SFRMDLY(width * 4);
sspsp |= SSPSP_EDMYSTOP(3);
sspsp |= SSPSP_DMYSTOP(3);
sspsp |= SSPSP_DMYSTRT(1);
#else
return -EINVAL;
#endif
} else {
/* The frame width is the width the LRCLK is
* asserted for; the delay is expressed in
* half cycle units. We need the extra cycle
* because the data starts clocking out one BCLK
* after LRCLK changes polarity.
*/
sspsp |= SSPSP_SFRMWDTH(width + 1);
sspsp |= SSPSP_SFRMDLY((width + 1) * 2);
sspsp |= SSPSP_DMYSTRT(1);
}
pxa_ssp_write_reg(ssp, SSPSP, sspsp);
break;
default:
break;
}
/* When we use a network mode, we always require TDM slots
* - complain loudly and fail if they've not been set up yet.
*/
if ((sscr0 & SSCR0_MOD) && !ttsa) {
dev_err(&ssp->pdev->dev, "No TDM timeslot configured\n");
return -EINVAL;
}
dump_registers(ssp);
return 0;
}
static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream,
struct ssp_device *ssp, int value)
{
uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1);
uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP);
uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR);
if (value && (sscr0 & SSCR0_SSE))
pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (value)
sscr1 |= SSCR1_TSRE;
else
sscr1 &= ~SSCR1_TSRE;
} else {
if (value)
sscr1 |= SSCR1_RSRE;
else
sscr1 &= ~SSCR1_RSRE;
}
pxa_ssp_write_reg(ssp, SSCR1, sscr1);
if (value) {
pxa_ssp_write_reg(ssp, SSSR, sssr);
pxa_ssp_write_reg(ssp, SSPSP, sspsp);
pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE);
}
}
static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
int ret = 0;
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
int val;
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
pxa_ssp_enable(ssp);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pxa_ssp_set_running_bit(substream, ssp, 1);
val = pxa_ssp_read_reg(ssp, SSSR);
pxa_ssp_write_reg(ssp, SSSR, val);
break;
case SNDRV_PCM_TRIGGER_START:
pxa_ssp_set_running_bit(substream, ssp, 1);
break;
case SNDRV_PCM_TRIGGER_STOP:
pxa_ssp_set_running_bit(substream, ssp, 0);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
pxa_ssp_disable(ssp);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
pxa_ssp_set_running_bit(substream, ssp, 0);
break;
default:
ret = -EINVAL;
}
dump_registers(ssp);
return ret;
}
static int pxa_ssp_probe(struct snd_soc_dai *dai)
{
struct ssp_priv *priv;
int ret;
priv = kzalloc(sizeof(struct ssp_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
if (priv->ssp == NULL) {
ret = -ENODEV;
goto err_priv;
}
priv->dai_fmt = (unsigned int) -1;
snd_soc_dai_set_drvdata(dai, priv);
return 0;
err_priv:
kfree(priv);
return ret;
}
static int pxa_ssp_remove(struct snd_soc_dai *dai)
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(dai);
pxa_ssp_free(priv->ssp);
kfree(priv);
return 0;
}
#define PXA_SSP_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
.startup = pxa_ssp_startup,
.shutdown = pxa_ssp_shutdown,
.trigger = pxa_ssp_trigger,
.hw_params = pxa_ssp_hw_params,
.set_sysclk = pxa_ssp_set_dai_sysclk,
.set_clkdiv = pxa_ssp_set_dai_clkdiv,
.set_pll = pxa_ssp_set_dai_pll,
.set_fmt = pxa_ssp_set_dai_fmt,
.set_tdm_slot = pxa_ssp_set_dai_tdm_slot,
.set_tristate = pxa_ssp_set_dai_tristate,
};
static struct snd_soc_dai_driver pxa_ssp_dai = {
.probe = pxa_ssp_probe,
.remove = pxa_ssp_remove,
.suspend = pxa_ssp_suspend,
.resume = pxa_ssp_resume,
.playback = {
.channels_min = 1,
.channels_max = 8,
.rates = PXA_SSP_RATES,
.formats = PXA_SSP_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
.rates = PXA_SSP_RATES,
.formats = PXA_SSP_FORMATS,
},
.ops = &pxa_ssp_dai_ops,
};
static __devinit int asoc_ssp_probe(struct platform_device *pdev)
{
return snd_soc_register_dai(&pdev->dev, &pxa_ssp_dai);
}
static int __devexit asoc_ssp_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver asoc_ssp_driver = {
.driver = {
.name = "pxa-ssp-dai",
.owner = THIS_MODULE,
},
.probe = asoc_ssp_probe,
.remove = __devexit_p(asoc_ssp_remove),
};
module_platform_driver(asoc_ssp_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TheTypoMaster/kernel_condor | drivers/staging/iio/light/tsl2563.c | 4875 | 21219 | /*
* drivers/i2c/chips/tsl2563.c
*
* Copyright (C) 2008 Nokia Corporation
*
* Written by Timo O. Karjalainen <timo.o.karjalainen@nokia.com>
* Contact: Amit Kucheria <amit.kucheria@verdurent.com>
*
* Converted to IIO driver
* Amit Kucheria <amit.kucheria@verdurent.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../events.h"
#include "tsl2563.h"
/* Use this many bits for fraction part. */
#define ADC_FRAC_BITS (14)
/* Given number of 1/10000's in ADC_FRAC_BITS precision. */
#define FRAC10K(f) (((f) * (1L << (ADC_FRAC_BITS))) / (10000))
/* Bits used for fraction in calibration coefficients.*/
#define CALIB_FRAC_BITS (10)
/* 0.5 in CALIB_FRAC_BITS precision */
#define CALIB_FRAC_HALF (1 << (CALIB_FRAC_BITS - 1))
/* Make a fraction from a number n that was multiplied with b. */
#define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b))
/* Decimal 10^(digits in sysfs presentation) */
#define CALIB_BASE_SYSFS (1000)
#define TSL2563_CMD (0x80)
#define TSL2563_CLEARINT (0x40)
#define TSL2563_REG_CTRL (0x00)
#define TSL2563_REG_TIMING (0x01)
#define TSL2563_REG_LOWLOW (0x02) /* data0 low threshold, 2 bytes */
#define TSL2563_REG_LOWHIGH (0x03)
#define TSL2563_REG_HIGHLOW (0x04) /* data0 high threshold, 2 bytes */
#define TSL2563_REG_HIGHHIGH (0x05)
#define TSL2563_REG_INT (0x06)
#define TSL2563_REG_ID (0x0a)
#define TSL2563_REG_DATA0LOW (0x0c) /* broadband sensor value, 2 bytes */
#define TSL2563_REG_DATA0HIGH (0x0d)
#define TSL2563_REG_DATA1LOW (0x0e) /* infrared sensor value, 2 bytes */
#define TSL2563_REG_DATA1HIGH (0x0f)
#define TSL2563_CMD_POWER_ON (0x03)
#define TSL2563_CMD_POWER_OFF (0x00)
#define TSL2563_CTRL_POWER_MASK (0x03)
#define TSL2563_TIMING_13MS (0x00)
#define TSL2563_TIMING_100MS (0x01)
#define TSL2563_TIMING_400MS (0x02)
#define TSL2563_TIMING_MASK (0x03)
#define TSL2563_TIMING_GAIN16 (0x10)
#define TSL2563_TIMING_GAIN1 (0x00)
#define TSL2563_INT_DISBLED (0x00)
#define TSL2563_INT_LEVEL (0x10)
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
struct tsl2563_gainlevel_coeff {
u8 gaintime;
u16 min;
u16 max;
};
static const struct tsl2563_gainlevel_coeff tsl2563_gainlevel_table[] = {
{
.gaintime = TSL2563_TIMING_400MS | TSL2563_TIMING_GAIN16,
.min = 0,
.max = 65534,
}, {
.gaintime = TSL2563_TIMING_400MS | TSL2563_TIMING_GAIN1,
.min = 2048,
.max = 65534,
}, {
.gaintime = TSL2563_TIMING_100MS | TSL2563_TIMING_GAIN1,
.min = 4095,
.max = 37177,
}, {
.gaintime = TSL2563_TIMING_13MS | TSL2563_TIMING_GAIN1,
.min = 3000,
.max = 65535,
},
};
struct tsl2563_chip {
struct mutex lock;
struct i2c_client *client;
struct delayed_work poweroff_work;
/* Remember state for suspend and resume functions */
bool suspended;
struct tsl2563_gainlevel_coeff const *gainlevel;
u16 low_thres;
u16 high_thres;
u8 intr;
bool int_enabled;
/* Calibration coefficients */
u32 calib0;
u32 calib1;
int cover_comp_gain;
/* Cache current values, to be returned while suspended */
u32 data0;
u32 data1;
};
static int tsl2563_set_power(struct tsl2563_chip *chip, int on)
{
struct i2c_client *client = chip->client;
u8 cmd;
cmd = on ? TSL2563_CMD_POWER_ON : TSL2563_CMD_POWER_OFF;
return i2c_smbus_write_byte_data(client,
TSL2563_CMD | TSL2563_REG_CTRL, cmd);
}
/*
* Return value is 0 for off, 1 for on, or a negative error
* code if reading failed.
*/
static int tsl2563_get_power(struct tsl2563_chip *chip)
{
struct i2c_client *client = chip->client;
int ret;
ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_CTRL);
if (ret < 0)
return ret;
return (ret & TSL2563_CTRL_POWER_MASK) == TSL2563_CMD_POWER_ON;
}
static int tsl2563_configure(struct tsl2563_chip *chip)
{
int ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_TIMING,
chip->gainlevel->gaintime);
if (ret)
goto error_ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_HIGHLOW,
chip->high_thres & 0xFF);
if (ret)
goto error_ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_HIGHHIGH,
(chip->high_thres >> 8) & 0xFF);
if (ret)
goto error_ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_LOWLOW,
chip->low_thres & 0xFF);
if (ret)
goto error_ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_LOWHIGH,
(chip->low_thres >> 8) & 0xFF);
/* Interrupt register is automatically written anyway if it is relevant
so is not here */
error_ret:
return ret;
}
static void tsl2563_poweroff_work(struct work_struct *work)
{
struct tsl2563_chip *chip =
container_of(work, struct tsl2563_chip, poweroff_work.work);
tsl2563_set_power(chip, 0);
}
static int tsl2563_detect(struct tsl2563_chip *chip)
{
int ret;
ret = tsl2563_set_power(chip, 1);
if (ret)
return ret;
ret = tsl2563_get_power(chip);
if (ret < 0)
return ret;
return ret ? 0 : -ENODEV;
}
static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
{
struct i2c_client *client = chip->client;
int ret;
ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_ID);
if (ret < 0)
return ret;
*id = ret;
return 0;
}
/*
* "Normalized" ADC value is one obtained with 400ms of integration time and
* 16x gain. This function returns the number of bits of shift needed to
* convert between normalized values and HW values obtained using given
* timing and gain settings.
*/
static int adc_shiftbits(u8 timing)
{
int shift = 0;
switch (timing & TSL2563_TIMING_MASK) {
case TSL2563_TIMING_13MS:
shift += 5;
break;
case TSL2563_TIMING_100MS:
shift += 2;
break;
case TSL2563_TIMING_400MS:
/* no-op */
break;
}
if (!(timing & TSL2563_TIMING_GAIN16))
shift += 4;
return shift;
}
/* Convert a HW ADC value to normalized scale. */
static u32 normalize_adc(u16 adc, u8 timing)
{
return adc << adc_shiftbits(timing);
}
static void tsl2563_wait_adc(struct tsl2563_chip *chip)
{
unsigned int delay;
switch (chip->gainlevel->gaintime & TSL2563_TIMING_MASK) {
case TSL2563_TIMING_13MS:
delay = 14;
break;
case TSL2563_TIMING_100MS:
delay = 101;
break;
default:
delay = 402;
}
/*
* TODO: Make sure that we wait at least required delay but why we
* have to extend it one tick more?
*/
schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
}
static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
{
struct i2c_client *client = chip->client;
if (adc > chip->gainlevel->max || adc < chip->gainlevel->min) {
(adc > chip->gainlevel->max) ?
chip->gainlevel++ : chip->gainlevel--;
i2c_smbus_write_byte_data(client,
TSL2563_CMD | TSL2563_REG_TIMING,
chip->gainlevel->gaintime);
tsl2563_wait_adc(chip);
tsl2563_wait_adc(chip);
return 1;
} else
return 0;
}
static int tsl2563_get_adc(struct tsl2563_chip *chip)
{
struct i2c_client *client = chip->client;
u16 adc0, adc1;
int retry = 1;
int ret = 0;
if (chip->suspended)
goto out;
if (!chip->int_enabled) {
cancel_delayed_work(&chip->poweroff_work);
if (!tsl2563_get_power(chip)) {
ret = tsl2563_set_power(chip, 1);
if (ret)
goto out;
ret = tsl2563_configure(chip);
if (ret)
goto out;
tsl2563_wait_adc(chip);
}
}
while (retry) {
ret = i2c_smbus_read_word_data(client,
TSL2563_CMD | TSL2563_REG_DATA0LOW);
if (ret < 0)
goto out;
adc0 = ret;
ret = i2c_smbus_read_word_data(client,
TSL2563_CMD | TSL2563_REG_DATA1LOW);
if (ret < 0)
goto out;
adc1 = ret;
retry = tsl2563_adjust_gainlevel(chip, adc0);
}
chip->data0 = normalize_adc(adc0, chip->gainlevel->gaintime);
chip->data1 = normalize_adc(adc1, chip->gainlevel->gaintime);
if (!chip->int_enabled)
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
ret = 0;
out:
return ret;
}
static inline int calib_to_sysfs(u32 calib)
{
return (int) (((calib * CALIB_BASE_SYSFS) +
CALIB_FRAC_HALF) >> CALIB_FRAC_BITS);
}
static inline u32 calib_from_sysfs(int value)
{
return (((u32) value) << CALIB_FRAC_BITS) / CALIB_BASE_SYSFS;
}
/*
* Conversions between lux and ADC values.
*
* The basic formula is lux = c0 * adc0 - c1 * adc1, where c0 and c1 are
* appropriate constants. Different constants are needed for different
* kinds of light, determined by the ratio adc1/adc0 (basically the ratio
* of the intensities in infrared and visible wavelengths). lux_table below
* lists the upper threshold of the adc1/adc0 ratio and the corresponding
* constants.
*/
struct tsl2563_lux_coeff {
unsigned long ch_ratio;
unsigned long ch0_coeff;
unsigned long ch1_coeff;
};
static const struct tsl2563_lux_coeff lux_table[] = {
{
.ch_ratio = FRAC10K(1300),
.ch0_coeff = FRAC10K(315),
.ch1_coeff = FRAC10K(262),
}, {
.ch_ratio = FRAC10K(2600),
.ch0_coeff = FRAC10K(337),
.ch1_coeff = FRAC10K(430),
}, {
.ch_ratio = FRAC10K(3900),
.ch0_coeff = FRAC10K(363),
.ch1_coeff = FRAC10K(529),
}, {
.ch_ratio = FRAC10K(5200),
.ch0_coeff = FRAC10K(392),
.ch1_coeff = FRAC10K(605),
}, {
.ch_ratio = FRAC10K(6500),
.ch0_coeff = FRAC10K(229),
.ch1_coeff = FRAC10K(291),
}, {
.ch_ratio = FRAC10K(8000),
.ch0_coeff = FRAC10K(157),
.ch1_coeff = FRAC10K(180),
}, {
.ch_ratio = FRAC10K(13000),
.ch0_coeff = FRAC10K(34),
.ch1_coeff = FRAC10K(26),
}, {
.ch_ratio = ULONG_MAX,
.ch0_coeff = 0,
.ch1_coeff = 0,
},
};
/*
* Convert normalized, scaled ADC values to lux.
*/
static unsigned int adc_to_lux(u32 adc0, u32 adc1)
{
const struct tsl2563_lux_coeff *lp = lux_table;
unsigned long ratio, lux, ch0 = adc0, ch1 = adc1;
ratio = ch0 ? ((ch1 << ADC_FRAC_BITS) / ch0) : ULONG_MAX;
while (lp->ch_ratio < ratio)
lp++;
lux = ch0 * lp->ch0_coeff - ch1 * lp->ch1_coeff;
return (unsigned int) (lux >> ADC_FRAC_BITS);
}
/*--------------------------------------------------------------*/
/* Sysfs interface */
/*--------------------------------------------------------------*/
/* Apply calibration coefficient to ADC count. */
static u32 calib_adc(u32 adc, u32 calib)
{
unsigned long scaled = adc;
scaled *= calib;
scaled >>= CALIB_FRAC_BITS;
return (u32) scaled;
}
static int tsl2563_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
if (chan->channel == 0)
chip->calib0 = calib_from_sysfs(val);
else
chip->calib1 = calib_from_sysfs(val);
return 0;
}
static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
switch (m) {
case 0:
switch (chan->type) {
case IIO_LIGHT:
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
calib0 = calib_adc(chip->data0, chip->calib0) *
chip->cover_comp_gain;
calib1 = calib_adc(chip->data1, chip->calib1) *
chip->cover_comp_gain;
*val = adc_to_lux(calib0, calib1);
ret = IIO_VAL_INT;
break;
case IIO_INTENSITY:
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
if (chan->channel == 0)
*val = chip->data0;
else
*val = chip->data1;
ret = IIO_VAL_INT;
break;
default:
break;
}
break;
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->channel == 0)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
ret = IIO_VAL_INT;
break;
default:
ret = -EINVAL;
goto error_ret;
}
error_ret:
mutex_unlock(&chip->lock);
return ret;
}
static const struct iio_chan_spec tsl2563_channels[] = {
{
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
}, {
.type = IIO_INTENSITY,
.modified = 1,
.channel2 = IIO_MOD_LIGHT_BOTH,
.info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
.event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING)),
}, {
.type = IIO_INTENSITY,
.modified = 1,
.channel2 = IIO_MOD_LIGHT_IR,
.info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}
};
static int tsl2563_read_thresh(struct iio_dev *indio_dev,
u64 event_code,
int *val)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
case IIO_EV_DIR_RISING:
*val = chip->high_thres;
break;
case IIO_EV_DIR_FALLING:
*val = chip->low_thres;
break;
default:
return -EINVAL;
}
return 0;
}
static int tsl2563_write_thresh(struct iio_dev *indio_dev,
u64 event_code,
int val)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
u8 address;
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
address = TSL2563_REG_HIGHLOW;
else
address = TSL2563_REG_LOWLOW;
mutex_lock(&chip->lock);
ret = i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | address,
val & 0xFF);
if (ret)
goto error_ret;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | (address + 1),
(val >> 8) & 0xFF);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
chip->high_thres = val;
else
chip->low_thres = val;
error_ret:
mutex_unlock(&chip->lock);
return ret;
}
static irqreturn_t tsl2563_event_handler(int irq, void *private)
{
struct iio_dev *dev_info = private;
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
iio_get_time_ns());
/* clear the interrupt and push the event */
i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
return IRQ_HANDLED;
}
static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
u64 event_code,
int state)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret = 0;
mutex_lock(&chip->lock);
if (state && !(chip->intr & 0x30)) {
chip->intr &= ~0x30;
chip->intr |= 0x10;
/* ensure the chip is actually on */
cancel_delayed_work(&chip->poweroff_work);
if (!tsl2563_get_power(chip)) {
ret = tsl2563_set_power(chip, 1);
if (ret)
goto out;
ret = tsl2563_configure(chip);
if (ret)
goto out;
}
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
chip->int_enabled = true;
}
if (!state && (chip->intr & 0x30)) {
chip->intr |= ~0x30;
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
chip->int_enabled = false;
/* now the interrupt is not enabled, we can go to sleep */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
}
out:
mutex_unlock(&chip->lock);
return ret;
}
static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
u64 event_code)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
mutex_lock(&chip->lock);
ret = i2c_smbus_read_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_INT);
mutex_unlock(&chip->lock);
if (ret < 0)
goto error_ret;
ret = !!(ret & 0x30);
error_ret:
return ret;
}
/*--------------------------------------------------------------*/
/* Probe, Attach, Remove */
/*--------------------------------------------------------------*/
static struct i2c_driver tsl2563_i2c_driver;
static const struct iio_info tsl2563_info_no_irq = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
};
static const struct iio_info tsl2563_info = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
.read_event_value = &tsl2563_read_thresh,
.write_event_value = &tsl2563_write_thresh,
.read_event_config = &tsl2563_read_interrupt_config,
.write_event_config = &tsl2563_write_interrupt_config,
};
static int __devinit tsl2563_probe(struct i2c_client *client,
const struct i2c_device_id *device_id)
{
struct iio_dev *indio_dev;
struct tsl2563_chip *chip;
struct tsl2563_platform_data *pdata = client->dev.platform_data;
int err = 0;
u8 id = 0;
indio_dev = iio_allocate_device(sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, chip);
chip->client = client;
err = tsl2563_detect(chip);
if (err) {
dev_err(&client->dev, "detect error %d\n", -err);
goto fail1;
}
err = tsl2563_read_id(chip, &id);
if (err) {
dev_err(&client->dev, "read id error %d\n", -err);
goto fail1;
}
mutex_init(&chip->lock);
/* Default values used until userspace says otherwise */
chip->low_thres = 0x0;
chip->high_thres = 0xffff;
chip->gainlevel = tsl2563_gainlevel_table;
chip->intr = TSL2563_INT_PERSIST(4);
chip->calib0 = calib_from_sysfs(CALIB_BASE_SYSFS);
chip->calib1 = calib_from_sysfs(CALIB_BASE_SYSFS);
if (pdata)
chip->cover_comp_gain = pdata->cover_comp_gain;
else
chip->cover_comp_gain = 1;
dev_info(&client->dev, "model %d, rev. %d\n", id >> 4, id & 0x0f);
indio_dev->name = client->name;
indio_dev->channels = tsl2563_channels;
indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
if (client->irq)
indio_dev->info = &tsl2563_info;
else
indio_dev->info = &tsl2563_info_no_irq;
if (client->irq) {
err = request_threaded_irq(client->irq,
NULL,
&tsl2563_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"tsl2563_event",
indio_dev);
if (err) {
dev_err(&client->dev, "irq request error %d\n", -err);
goto fail1;
}
}
err = tsl2563_configure(chip);
if (err) {
dev_err(&client->dev, "configure error %d\n", -err);
goto fail2;
}
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
/* The interrupt cannot yet be enabled so this is fine without lock */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
err = iio_device_register(indio_dev);
if (err) {
dev_err(&client->dev, "iio registration error %d\n", -err);
goto fail3;
}
return 0;
fail3:
cancel_delayed_work(&chip->poweroff_work);
flush_scheduled_work();
fail2:
if (client->irq)
free_irq(client->irq, indio_dev);
fail1:
iio_free_device(indio_dev);
return err;
}
static int tsl2563_remove(struct i2c_client *client)
{
struct tsl2563_chip *chip = i2c_get_clientdata(client);
struct iio_dev *indio_dev = iio_priv_to_dev(chip);
iio_device_unregister(indio_dev);
if (!chip->int_enabled)
cancel_delayed_work(&chip->poweroff_work);
/* Ensure that interrupts are disabled - then flush any bottom halves */
chip->intr |= ~0x30;
i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
flush_scheduled_work();
tsl2563_set_power(chip, 0);
if (client->irq)
free_irq(client->irq, indio_dev);
iio_free_device(indio_dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tsl2563_suspend(struct device *dev)
{
struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
ret = tsl2563_set_power(chip, 0);
if (ret)
goto out;
chip->suspended = true;
out:
mutex_unlock(&chip->lock);
return ret;
}
static int tsl2563_resume(struct device *dev)
{
struct tsl2563_chip *chip = i2c_get_clientdata(to_i2c_client(dev));
int ret;
mutex_lock(&chip->lock);
ret = tsl2563_set_power(chip, 1);
if (ret)
goto out;
ret = tsl2563_configure(chip);
if (ret)
goto out;
chip->suspended = false;
out:
mutex_unlock(&chip->lock);
return ret;
}
static SIMPLE_DEV_PM_OPS(tsl2563_pm_ops, tsl2563_suspend, tsl2563_resume);
#define TSL2563_PM_OPS (&tsl2563_pm_ops)
#else
#define TSL2563_PM_OPS NULL
#endif
static const struct i2c_device_id tsl2563_id[] = {
{ "tsl2560", 0 },
{ "tsl2561", 1 },
{ "tsl2562", 2 },
{ "tsl2563", 3 },
{}
};
MODULE_DEVICE_TABLE(i2c, tsl2563_id);
static struct i2c_driver tsl2563_i2c_driver = {
.driver = {
.name = "tsl2563",
.pm = TSL2563_PM_OPS,
},
.probe = tsl2563_probe,
.remove = __devexit_p(tsl2563_remove),
.id_table = tsl2563_id,
};
module_i2c_driver(tsl2563_i2c_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("tsl2563 light sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
delapuente/codeaurora_kernel_msm | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 5387 | 30270 | /*
* Copyright 2011 (c) Oracle Corp.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*/
/*
* A simple DMA pool losely based on dmapool.c. It has certain advantages
* over the DMA pools:
* - Pool collects resently freed pages for reuse (and hooks up to
* the shrinker).
* - Tracks currently in use pages
* - Tracks whether the page is UC, WB or cached (and reverts to WB
* when freed).
*/
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include <asm/agp.h>
#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
/* times are in msecs */
#define IS_UNDEFINED (0)
#define IS_WC (1<<1)
#define IS_UC (1<<2)
#define IS_CACHED (1<<3)
#define IS_DMA32 (1<<4)
enum pool_type {
POOL_IS_UNDEFINED,
POOL_IS_WC = IS_WC,
POOL_IS_UC = IS_UC,
POOL_IS_CACHED = IS_CACHED,
POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
};
/*
* The pool structure. There are usually six pools:
* - generic (not restricted to DMA32):
* - write combined, uncached, cached.
* - dma32 (up to 2^32 - so up 4GB):
* - write combined, uncached, cached.
* for each 'struct device'. The 'cached' is for pages that are actively used.
* The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool
* @lock: Protects the inuse_list and free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work.
* @inuse_list: Pool of pages that are in use. The order is very important and
* it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation.
* @npages_free: Count of available pages for re-use.
* @npages_in_use: Count of pages that are in use.
* @nfrees: Stats when pool is shrinking.
* @nrefills: Stats when the pool is grown.
* @gfp_flags: Flags to pass for alloc_page.
* @name: Name of the pool.
* @dev_name: Name derieved from dev - similar to how dev_info works.
* Used during shutdown as the dev_info during release is unavailable.
*/
struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type;
spinlock_t lock;
struct list_head inuse_list;
struct list_head free_list;
struct device *dev;
unsigned size;
unsigned npages_free;
unsigned npages_in_use;
unsigned long nfrees; /* Stats when shrunk. */
unsigned long nrefills; /* Stats when grown. */
gfp_t gfp_flags;
char name[13]; /* "cached dma32" */
char dev_name[64]; /* Constructed from dev */
};
/*
* The accounting page keeping track of the allocated page along with
* the DMA address.
* @page_list: The link to the 'page_list' in 'struct dma_pool'.
* @vaddr: The virtual address of the page
* @dma: The bus address of the page. If the page is not allocated
* via the DMA API, it will be -1.
*/
struct dma_page {
struct list_head page_list;
void *vaddr;
struct page *p;
dma_addr_t dma;
};
/*
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
unsigned small;
};
/*
* Contains the list of all of the 'struct device' and their corresponding
* DMA pools. Guarded by _mutex->lock.
* @pools: The link to 'struct ttm_pool_manager->pools'
* @dev: The 'struct device' associated with the 'pool'
* @pool: The 'struct dma_pool' associated with the 'dev'
*/
struct device_pools {
struct list_head pools;
struct device *dev;
struct dma_pool *pool;
};
/*
* struct ttm_pool_manager - Holds memory pools for fast allocation
*
* @lock: Lock used when adding/removing from pools
* @pools: List of 'struct device' and 'struct dma_pool' tuples.
* @options: Limits for the pool.
* @npools: Total amount of pools in existence.
* @shrinker: The structure used by [un|]register_shrinker
*/
struct ttm_pool_manager {
struct mutex lock;
struct list_head pools;
struct ttm_pool_opts options;
unsigned npools;
struct shrinker mm_shrink;
struct kobject kobj;
};
static struct ttm_pool_manager *_manager;
static struct attribute ttm_page_pool_max = {
.name = "pool_max_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
.name = "pool_small_allocation",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
.name = "pool_allocation_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute *ttm_pool_attrs[] = {
&ttm_page_pool_max,
&ttm_page_pool_small,
&ttm_page_pool_alloc_size,
NULL
};
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
kfree(m);
}
static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buffer, size_t size)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
return size;
}
static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
unsigned val = 0;
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
val = val * (PAGE_SIZE >> 10);
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
.sysfs_ops = &ttm_pool_sysfs_ops,
.default_attrs = ttm_pool_attrs,
};
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
unmap_page_from_agp(pages[i]);
#endif
return 0;
}
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
#endif /* for !CONFIG_X86 */
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
r = set_pages_array_uc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
r = set_pages_array_wc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
}
return r;
}
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
dma_addr_t dma = d_page->dma;
dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
kfree(d_page);
d_page = NULL;
}
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
&d_page->dma,
pool->gfp_flags);
if (d_page->vaddr)
d_page->p = virt_to_page(d_page->vaddr);
else {
kfree(d_page);
d_page = NULL;
}
return d_page;
}
static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
{
enum pool_type type = IS_UNDEFINED;
if (flags & TTM_PAGE_FLAG_DMA32)
type |= IS_DMA32;
if (cstate == tt_cached)
type |= IS_CACHED;
else if (cstate == tt_uncached)
type |= IS_UC;
else
type |= IS_WC;
return type;
}
static void ttm_pool_update_free_locked(struct dma_pool *pool,
unsigned freed_pages)
{
pool->npages_free -= freed_pages;
pool->nfrees += freed_pages;
}
/* set memory back to wb and free the pages. */
static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
struct page *pages[], unsigned npages)
{
struct dma_page *d_page, *tmp;
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
set_pages_array_wb(pages, npages))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
}
}
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, 1);
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
}
/*
* Free pages from pool.
*
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
* number of pages in one go.
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
**/
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
{
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
struct list_head d_pages;
unsigned freed_pages = 0,
npages_to_free = nr_free;
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
#if 0
if (nr_free > 1) {
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
pool->dev_name, pool->name, current->pid,
npages_to_free, nr_free);
}
#endif
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
pool->dev_name);
return 0;
}
INIT_LIST_HEAD(&d_pages);
restart:
spin_lock_irqsave(&pool->lock, irq_flags);
/* We picking the oldest ones off the list */
list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
page_list) {
if (freed_pages >= npages_to_free)
break;
/* Move the dma_page from one list to another. */
list_move(&dma_p->page_list, &d_pages);
pages_to_free[freed_pages++] = dma_p->p;
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
ttm_pool_update_free_locked(pool, freed_pages);
/**
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
ttm_dma_pages_put(pool, &d_pages, pages_to_free,
freed_pages);
INIT_LIST_HEAD(&d_pages);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
if (NUM_PAGES_TO_ALLOC >= nr_free)
npages_to_free = nr_free;
else
npages_to_free = NUM_PAGES_TO_ALLOC;
freed_pages = 0;
/* free all so restart the processing */
if (nr_free)
goto restart;
/* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
goto out;
}
}
/* remove range of pages from the pool */
if (freed_pages) {
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (freed_pages)
ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
out:
kfree(pages_to_free);
return nr_free;
}
static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
{
struct device_pools *p;
struct dma_pool *pool;
if (!dev)
return;
mutex_lock(&_manager->lock);
list_for_each_entry_reverse(p, &_manager->pools, pools) {
if (p->dev != dev)
continue;
pool = p->pool;
if (pool->type != type)
continue;
list_del(&p->pools);
kfree(p);
_manager->npools--;
break;
}
list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
if (pool->type != type)
continue;
/* Takes a spinlock.. */
ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
* touching it. In case somebody is trying to _add_ we are
* guarded by the mutex. */
list_del(&pool->pools);
kfree(pool);
break;
}
mutex_unlock(&_manager->lock);
}
/*
* On free-ing of the 'struct device' this deconstructor is run.
* Albeit the pool might have already been freed earlier.
*/
static void ttm_dma_pool_release(struct device *dev, void *res)
{
struct dma_pool *pool = *(struct dma_pool **)res;
if (pool)
ttm_dma_free_pool(dev, pool->type);
}
static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
{
return *(struct dma_pool **)res == match_data;
}
static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
enum pool_type type)
{
char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
struct device_pools *sec_pool = NULL;
struct dma_pool *pool = NULL, **ptr;
unsigned i;
int ret = -ENODEV;
char *p;
if (!dev)
return NULL;
ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
ret = -ENOMEM;
pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
dev_to_node(dev));
if (!pool)
goto err_mem;
sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
dev_to_node(dev));
if (!sec_pool)
goto err_mem;
INIT_LIST_HEAD(&sec_pool->pools);
sec_pool->dev = dev;
sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->npages_free = pool->npages_in_use = 0;
pool->nfrees = 0;
pool->gfp_flags = flags;
pool->size = PAGE_SIZE;
pool->type = type;
pool->nrefills = 0;
p = pool->name;
for (i = 0; i < 5; i++) {
if (type & t[i]) {
p += snprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
*p = 0;
/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
* - the kobj->name has already been deallocated.*/
snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
dev_driver_string(dev), dev_name(dev));
mutex_lock(&_manager->lock);
/* You can get the dma_pool from either the global: */
list_add(&sec_pool->pools, &_manager->pools);
_manager->npools++;
/* or from 'struct device': */
list_add(&pool->pools, &dev->dma_pools);
mutex_unlock(&_manager->lock);
*ptr = pool;
devres_add(dev, ptr);
return pool;
err_mem:
devres_free(ptr);
kfree(sec_pool);
kfree(pool);
return ERR_PTR(ret);
}
static struct dma_pool *ttm_dma_find_pool(struct device *dev,
enum pool_type type)
{
struct dma_pool *pool, *tmp, *found = NULL;
if (type == IS_UNDEFINED)
return found;
/* NB: We iterate on the 'struct dev' which has no spinlock, but
* it does have a kref which we have taken. The kref is taken during
* graphic driver loading - in the drm_pci_init it calls either
* pci_dev_get or pci_register_driver which both end up taking a kref
* on 'struct device'.
*
* On teardown, the graphic drivers end up quiescing the TTM (put_pages)
* and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
* thing is at that point of time there are no pages associated with the
* driver so this function will not be called.
*/
list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
if (pool->type != type)
continue;
found = pool;
break;
}
return found;
}
/*
* Free pages the pages that failed to change the caching state. If there
* are pages that have changed their caching state already put them to the
* pool.
*/
static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
struct list_head *d_pages,
struct page **failed_pages,
unsigned cpages)
{
struct dma_page *d_page, *tmp;
struct page *p;
unsigned i = 0;
p = failed_pages[0];
if (!p)
return;
/* Find the failed page. */
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
if (d_page->p != p)
continue;
/* .. and then progress over the full list. */
list_del(&d_page->page_list);
__ttm_dma_free_page(pool, d_page);
if (++i < cpages)
p = failed_pages[i];
else
break;
}
}
/*
* Allocate 'count' pages, and put 'need' number of them on the
* 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
* The full list of pages should also be on 'd_pages'.
* We return zero for success, and negative numbers as errors.
*/
static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
struct list_head *d_pages,
unsigned count)
{
struct page **caching_array;
struct dma_page *dma_p;
struct page *p;
int r = 0;
unsigned i, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
pr_err("%s: Unable to allocate table for new pages\n",
pool->dev_name);
return -ENOMEM;
}
if (count > 1) {
pr_debug("%s: (%s:%d) Getting %d pages\n",
pool->dev_name, pool->name, current->pid, count);
}
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
pr_err("%s: Unable to get page %u\n",
pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r)
ttm_dma_handle_caching_state_failure(
pool, d_pages, caching_array,
cpages);
}
r = -ENOMEM;
goto out;
}
p = dma_p->p;
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
if (!PageHighMem(p))
#endif
{
caching_array[cpages++] = p;
if (cpages == max_cpages) {
/* Note: Cannot hold the spinlock */
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r) {
ttm_dma_handle_caching_state_failure(
pool, d_pages, caching_array,
cpages);
goto out;
}
cpages = 0;
}
}
list_add(&dma_p->page_list, d_pages);
}
if (cpages) {
r = ttm_set_pages_caching(pool, caching_array, cpages);
if (r)
ttm_dma_handle_caching_state_failure(pool, d_pages,
caching_array, cpages);
}
out:
kfree(caching_array);
return r;
}
/*
* @return count of pages still required to fulfill the request.
*/
static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
unsigned long *irq_flags)
{
unsigned count = _manager->options.small;
int r = pool->npages_free;
if (count > pool->npages_free) {
struct list_head d_pages;
INIT_LIST_HEAD(&d_pages);
spin_unlock_irqrestore(&pool->lock, *irq_flags);
/* Returns how many more are neccessary to fulfill the
* request. */
r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
/* Add the fresh to the end.. */
list_splice(&d_pages, &pool->free_list);
++pool->nrefills;
pool->npages_free += count;
r = count;
} else {
struct dma_page *d_page;
unsigned cpages = 0;
pr_err("%s: Failed to fill %s pool (r:%d)!\n",
pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
}
list_splice_tail(&d_pages, &pool->free_list);
pool->npages_free += cpages;
r = cpages;
}
}
return r;
}
/*
* @return count of pages still required to fulfill the request.
* The populate list is actually a stack (not that is matters as TTM
* allocates one page at a time.
*/
static int ttm_dma_pool_get_pages(struct dma_pool *pool,
struct ttm_dma_tt *ttm_dma,
unsigned index)
{
struct dma_page *d_page;
struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
int count, r = -ENOMEM;
spin_lock_irqsave(&pool->lock, irq_flags);
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
return r;
}
/*
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct dma_pool *pool;
enum pool_type type;
unsigned i;
gfp_t gfp_flags;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags = GFP_USER | GFP_DMA32;
else
gfp_flags = GFP_HIGHUSER;
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool)) {
return -ENOMEM;
}
}
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
if (ret != 0) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
false, false);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return ret;
}
}
ttm->state = tt_unbound;
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Get good estimation how many pages are free in pools */
static int ttm_dma_pool_get_num_unused_pages(void)
{
struct device_pools *p;
unsigned total = 0;
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools)
total += p->pool->npages_free;
mutex_unlock(&_manager->lock);
return total;
}
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
unsigned count = 0, i, npages = 0;
unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
pool = ttm_dma_find_pool(dev, type);
if (!pool)
return;
is_cached = (ttm_dma_find_pool(pool->dev,
ttm_to_type(ttm->page_flags, tt_cached)) == pool);
/* make sure pages array match list and count number of pages */
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
ttm->pages[count] = d_page->p;
count++;
}
spin_lock_irqsave(&pool->lock, irq_flags);
pool->npages_in_use -= count;
if (is_cached) {
pool->nfrees += count;
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
npages = count;
if (pool->npages_free > _manager->options.max_size) {
npages = pool->npages_free - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p);
ttm_dma_page_put(pool, d_page);
}
} else {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
}
}
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
ttm_dma->dma_address[i] = 0;
}
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
ttm_dma_page_pool_free(pool, npages);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
if (list_empty(&_manager->pools))
return 0;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
if (!p->dev)
continue;
if (shrink_pages == 0)
break;
/* Do it in round-robin fashion. */
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */
return ttm_dma_pool_get_num_unused_pages();
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
unregister_shrinker(&manager->mm_shrink);
}
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret = -ENOMEM;
WARN_ON(_manager);
pr_info("Initializing DMA pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
if (!_manager)
goto err_manager;
mutex_init(&_manager->lock);
INIT_LIST_HEAD(&_manager->pools);
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
/* This takes care of auto-freeing the _manager */
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "dma_pool");
if (unlikely(ret != 0)) {
kobject_put(&_manager->kobj);
goto err;
}
ttm_dma_pool_mm_shrink_init(_manager);
return 0;
err_manager:
kfree(_manager);
_manager = NULL;
err:
return ret;
}
void ttm_dma_page_alloc_fini(void)
{
struct device_pools *p, *t;
pr_info("Finalizing DMA pool allocator\n");
ttm_dma_pool_mm_shrink_fini(_manager);
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
current->pid);
WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
ttm_dma_pool_match, p->pool));
ttm_dma_free_pool(p->dev, p->pool->type);
}
kobject_put(&_manager->kobj);
_manager = NULL;
}
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct device_pools *p;
struct dma_pool *pool = NULL;
char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
"name", "virt", "busaddr"};
if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
h[0], h[1], h[2], h[3], h[4], h[5]);
mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools) {
struct device *dev = p->dev;
if (!dev)
continue;
pool = p->pool;
seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
pool->name, pool->nrefills,
pool->nfrees, pool->npages_in_use,
pool->npages_free,
pool->dev_name);
}
mutex_unlock(&_manager->lock);
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
| gpl-2.0 |
qubex22/AK-OnePone | sound/ppc/pmac.c | 5643 | 38204 | /*
* PMac DBDMA lowlevel functions
*
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
* code based on dmasound.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include "pmac.h"
#include <sound/pcm_params.h>
#include <asm/pmac_feature.h>
#include <asm/pci-bridge.h>
/* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */
static int awacs_freqs[8] = {
44100, 29400, 22050, 17640, 14700, 11025, 8820, 7350
};
/* fixed frequency table for tumbler */
static int tumbler_freqs[1] = {
44100
};
/*
* we will allocate a single 'emergency' dbdma cmd block to use if the
* tx status comes up "DEAD". This happens on some PowerComputing Pmac
* clones, either owing to a bug in dbdma or some interaction between
* IDE and sound. However, this measure would deal with DEAD status if
* it appeared elsewhere.
*/
static struct pmac_dbdma emergency_dbdma;
static int emergency_in_use;
/*
* allocate DBDMA command arrays
*/
static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size)
{
unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1);
rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize,
&rec->dma_base, GFP_KERNEL);
if (rec->space == NULL)
return -ENOMEM;
rec->size = size;
memset(rec->space, 0, rsize);
rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space);
rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space);
return 0;
}
static void snd_pmac_dbdma_free(struct snd_pmac *chip, struct pmac_dbdma *rec)
{
if (rec->space) {
unsigned int rsize = sizeof(struct dbdma_cmd) * (rec->size + 1);
dma_free_coherent(&chip->pdev->dev, rsize, rec->space, rec->dma_base);
}
}
/*
* pcm stuff
*/
/*
* look up frequency table
*/
unsigned int snd_pmac_rate_index(struct snd_pmac *chip, struct pmac_stream *rec, unsigned int rate)
{
int i, ok, found;
ok = rec->cur_freqs;
if (rate > chip->freq_table[0])
return 0;
found = 0;
for (i = 0; i < chip->num_freqs; i++, ok >>= 1) {
if (! (ok & 1)) continue;
found = i;
if (rate >= chip->freq_table[i])
break;
}
return found;
}
/*
* check whether another stream is active
*/
static inline int another_stream(int stream)
{
return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
}
/*
* allocate buffers
*/
static int snd_pmac_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
* release buffers
*/
static int snd_pmac_pcm_hw_free(struct snd_pcm_substream *subs)
{
snd_pcm_lib_free_pages(subs);
return 0;
}
/*
* get a stream of the opposite direction
*/
static struct pmac_stream *snd_pmac_get_stream(struct snd_pmac *chip, int stream)
{
switch (stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
return &chip->playback;
case SNDRV_PCM_STREAM_CAPTURE:
return &chip->capture;
default:
snd_BUG();
return NULL;
}
}
/*
* wait while run status is on
*/
static inline void
snd_pmac_wait_ack(struct pmac_stream *rec)
{
int timeout = 50000;
while ((in_le32(&rec->dma->status) & RUN) && timeout-- > 0)
udelay(1);
}
/*
* set the format and rate to the chip.
* call the lowlevel function if defined (e.g. for AWACS).
*/
static void snd_pmac_pcm_set_format(struct snd_pmac *chip)
{
/* set up frequency and format */
out_le32(&chip->awacs->control, chip->control_mask | (chip->rate_index << 8));
out_le32(&chip->awacs->byteswap, chip->format == SNDRV_PCM_FORMAT_S16_LE ? 1 : 0);
if (chip->set_format)
chip->set_format(chip);
}
/*
* stop the DMA transfer
*/
static inline void snd_pmac_dma_stop(struct pmac_stream *rec)
{
out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16);
snd_pmac_wait_ack(rec);
}
/*
* set the command pointer address
*/
static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd)
{
out_le32(&rec->dma->cmdptr, cmd->addr);
}
/*
* start the DMA
*/
static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status)
{
out_le32(&rec->dma->control, status | (status << 16));
}
/*
* prepare playback/capture stream
*/
static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec, struct snd_pcm_substream *subs)
{
int i;
volatile struct dbdma_cmd __iomem *cp;
struct snd_pcm_runtime *runtime = subs->runtime;
int rate_index;
long offset;
struct pmac_stream *astr;
rec->dma_size = snd_pcm_lib_buffer_bytes(subs);
rec->period_size = snd_pcm_lib_period_bytes(subs);
rec->nperiods = rec->dma_size / rec->period_size;
rec->cur_period = 0;
rate_index = snd_pmac_rate_index(chip, rec, runtime->rate);
/* set up constraints */
astr = snd_pmac_get_stream(chip, another_stream(rec->stream));
if (! astr)
return -EINVAL;
astr->cur_freqs = 1 << rate_index;
astr->cur_formats = 1 << runtime->format;
chip->rate_index = rate_index;
chip->format = runtime->format;
/* We really want to execute a DMA stop command, after the AWACS
* is initialized.
* For reasons I don't understand, it stops the hissing noise
* common to many PowerBook G3 systems and random noise otherwise
* captured on iBook2's about every third time. -ReneR
*/
spin_lock_irq(&chip->reg_lock);
snd_pmac_dma_stop(rec);
st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
snd_pmac_dma_set_command(rec, &chip->extra_dma);
snd_pmac_dma_run(rec, RUN);
spin_unlock_irq(&chip->reg_lock);
mdelay(5);
spin_lock_irq(&chip->reg_lock);
/* continuous DMA memory type doesn't provide the physical address,
* so we need to resolve the address here...
*/
offset = runtime->dma_addr;
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++) {
st_le32(&cp->phy_addr, offset);
st_le16(&cp->req_count, rec->period_size);
/*st_le16(&cp->res_count, 0);*/
st_le16(&cp->xfer_status, 0);
offset += rec->period_size;
}
/* make loop */
st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
st_le32(&cp->cmd_dep, rec->cmd.addr);
snd_pmac_dma_stop(rec);
snd_pmac_dma_set_command(rec, &rec->cmd);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
/*
* PCM trigger/stop
*/
static int snd_pmac_pcm_trigger(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs, int cmd)
{
volatile struct dbdma_cmd __iomem *cp;
int i, command;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
if (rec->running)
return -EBUSY;
command = (subs->stream == SNDRV_PCM_STREAM_PLAYBACK ?
OUTPUT_MORE : INPUT_MORE) + INTR_ALWAYS;
spin_lock(&chip->reg_lock);
snd_pmac_beep_stop(chip);
snd_pmac_pcm_set_format(chip);
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++)
out_le16(&cp->command, command);
snd_pmac_dma_set_command(rec, &rec->cmd);
(void)in_le32(&rec->dma->status);
snd_pmac_dma_run(rec, RUN|WAKE);
rec->running = 1;
spin_unlock(&chip->reg_lock);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&chip->reg_lock);
rec->running = 0;
/*printk(KERN_DEBUG "stopped!!\n");*/
snd_pmac_dma_stop(rec);
for (i = 0, cp = rec->cmd.cmds; i < rec->nperiods; i++, cp++)
out_le16(&cp->command, DBDMA_STOP);
spin_unlock(&chip->reg_lock);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* return the current pointer
*/
inline
static snd_pcm_uframes_t snd_pmac_pcm_pointer(struct snd_pmac *chip,
struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
int count = 0;
#if 1 /* hmm.. how can we get the current dma pointer?? */
int stat;
volatile struct dbdma_cmd __iomem *cp = &rec->cmd.cmds[rec->cur_period];
stat = ld_le16(&cp->xfer_status);
if (stat & (ACTIVE|DEAD)) {
count = in_le16(&cp->res_count);
if (count)
count = rec->period_size - count;
}
#endif
count += rec->cur_period * rec->period_size;
/*printk(KERN_DEBUG "pointer=%d\n", count);*/
return bytes_to_frames(subs->runtime, count);
}
/*
* playback
*/
static int snd_pmac_playback_prepare(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_prepare(chip, &chip->playback, subs);
}
static int snd_pmac_playback_trigger(struct snd_pcm_substream *subs,
int cmd)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_trigger(chip, &chip->playback, subs, cmd);
}
static snd_pcm_uframes_t snd_pmac_playback_pointer(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_pointer(chip, &chip->playback, subs);
}
/*
* capture
*/
static int snd_pmac_capture_prepare(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_prepare(chip, &chip->capture, subs);
}
static int snd_pmac_capture_trigger(struct snd_pcm_substream *subs,
int cmd)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_trigger(chip, &chip->capture, subs, cmd);
}
static snd_pcm_uframes_t snd_pmac_capture_pointer(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_pointer(chip, &chip->capture, subs);
}
/*
* Handle DEAD DMA transfers:
* if the TX status comes up "DEAD" - reported on some Power Computing machines
* we need to re-start the dbdma - but from a different physical start address
* and with a different transfer length. It would get very messy to do this
* with the normal dbdma_cmd blocks - we would have to re-write the buffer start
* addresses each time. So, we will keep a single dbdma_cmd block which can be
* fiddled with.
* When DEAD status is first reported the content of the faulted dbdma block is
* copied into the emergency buffer and we note that the buffer is in use.
* we then bump the start physical address by the amount that was successfully
* output before it died.
* On any subsequent DEAD result we just do the bump-ups (we know that we are
* already using the emergency dbdma_cmd).
* CHECK: this just tries to "do it". It is possible that we should abandon
* xfers when the number of residual bytes gets below a certain value - I can
* see that this might cause a loop-forever if a too small transfer causes
* DEAD status. However this is a TODO for now - we'll see what gets reported.
* When we get a successful transfer result with the emergency buffer we just
* pretend that it completed using the original dmdma_cmd and carry on. The
* 'next_cmd' field will already point back to the original loop of blocks.
*/
static inline void snd_pmac_pcm_dead_xfer(struct pmac_stream *rec,
volatile struct dbdma_cmd __iomem *cp)
{
unsigned short req, res ;
unsigned int phy ;
/* printk(KERN_WARNING "snd-powermac: DMA died - patching it up!\n"); */
/* to clear DEAD status we must first clear RUN
set it to quiescent to be on the safe side */
(void)in_le32(&rec->dma->status);
out_le32(&rec->dma->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
if (!emergency_in_use) { /* new problem */
memcpy((void *)emergency_dbdma.cmds, (void *)cp,
sizeof(struct dbdma_cmd));
emergency_in_use = 1;
st_le16(&cp->xfer_status, 0);
st_le16(&cp->req_count, rec->period_size);
cp = emergency_dbdma.cmds;
}
/* now bump the values to reflect the amount
we haven't yet shifted */
req = ld_le16(&cp->req_count);
res = ld_le16(&cp->res_count);
phy = ld_le32(&cp->phy_addr);
phy += (req - res);
st_le16(&cp->req_count, res);
st_le16(&cp->res_count, 0);
st_le16(&cp->xfer_status, 0);
st_le32(&cp->phy_addr, phy);
st_le32(&cp->cmd_dep, rec->cmd.addr
+ sizeof(struct dbdma_cmd)*((rec->cur_period+1)%rec->nperiods));
st_le16(&cp->command, OUTPUT_MORE | BR_ALWAYS | INTR_ALWAYS);
/* point at our patched up command block */
out_le32(&rec->dma->cmdptr, emergency_dbdma.addr);
/* we must re-start the controller */
(void)in_le32(&rec->dma->status);
/* should complete clearing the DEAD status */
out_le32(&rec->dma->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
}
/*
* update playback/capture pointer from interrupts
*/
static void snd_pmac_pcm_update(struct snd_pmac *chip, struct pmac_stream *rec)
{
volatile struct dbdma_cmd __iomem *cp;
int c;
int stat;
spin_lock(&chip->reg_lock);
if (rec->running) {
for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */
if (emergency_in_use) /* already using DEAD xfer? */
cp = emergency_dbdma.cmds;
else
cp = &rec->cmd.cmds[rec->cur_period];
stat = ld_le16(&cp->xfer_status);
if (stat & DEAD) {
snd_pmac_pcm_dead_xfer(rec, cp);
break; /* this block is still going */
}
if (emergency_in_use)
emergency_in_use = 0 ; /* done that */
if (! (stat & ACTIVE))
break;
/*printk(KERN_DEBUG "update frag %d\n", rec->cur_period);*/
st_le16(&cp->xfer_status, 0);
st_le16(&cp->req_count, rec->period_size);
/*st_le16(&cp->res_count, 0);*/
rec->cur_period++;
if (rec->cur_period >= rec->nperiods) {
rec->cur_period = 0;
}
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(rec->substream);
spin_lock(&chip->reg_lock);
}
}
spin_unlock(&chip->reg_lock);
}
/*
* hw info
*/
static struct snd_pcm_hardware snd_pmac_playback =
{
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000_44100,
.rate_min = 7350,
.rate_max = 44100,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 131072,
.period_bytes_min = 256,
.period_bytes_max = 16384,
.periods_min = 3,
.periods_max = PMAC_MAX_FRAGS,
};
static struct snd_pcm_hardware snd_pmac_capture =
{
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_RESUME),
.formats = SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000_44100,
.rate_min = 7350,
.rate_max = 44100,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 131072,
.period_bytes_min = 256,
.period_bytes_max = 16384,
.periods_min = 3,
.periods_max = PMAC_MAX_FRAGS,
};
#if 0 // NYI
static int snd_pmac_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pmac *chip = rule->private;
struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]);
int i, freq_table[8], num_freqs;
if (! rec)
return -EINVAL;
num_freqs = 0;
for (i = chip->num_freqs - 1; i >= 0; i--) {
if (rec->cur_freqs & (1 << i))
freq_table[num_freqs++] = chip->freq_table[i];
}
return snd_interval_list(hw_param_interval(params, rule->var),
num_freqs, freq_table, 0);
}
static int snd_pmac_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pmac *chip = rule->private;
struct pmac_stream *rec = snd_pmac_get_stream(chip, rule->deps[0]);
if (! rec)
return -EINVAL;
return snd_mask_refine_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT),
rec->cur_formats);
}
#endif // NYI
static int snd_pmac_pcm_open(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
struct snd_pcm_runtime *runtime = subs->runtime;
int i;
/* look up frequency table and fill bit mask */
runtime->hw.rates = 0;
for (i = 0; i < chip->num_freqs; i++)
if (chip->freqs_ok & (1 << i))
runtime->hw.rates |=
snd_pcm_rate_to_rate_bit(chip->freq_table[i]);
/* check for minimum and maximum rates */
for (i = 0; i < chip->num_freqs; i++) {
if (chip->freqs_ok & (1 << i)) {
runtime->hw.rate_max = chip->freq_table[i];
break;
}
}
for (i = chip->num_freqs - 1; i >= 0; i--) {
if (chip->freqs_ok & (1 << i)) {
runtime->hw.rate_min = chip->freq_table[i];
break;
}
}
runtime->hw.formats = chip->formats_ok;
if (chip->can_capture) {
if (! chip->can_duplex)
runtime->hw.info |= SNDRV_PCM_INFO_HALF_DUPLEX;
runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
}
runtime->private_data = rec;
rec->substream = subs;
#if 0 /* FIXME: still under development.. */
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_pmac_hw_rule_rate, chip, rec->stream, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
snd_pmac_hw_rule_format, chip, rec->stream, -1);
#endif
runtime->hw.periods_max = rec->cmd.size - 1;
/* constraints to fix choppy sound */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
return 0;
}
static int snd_pmac_pcm_close(struct snd_pmac *chip, struct pmac_stream *rec,
struct snd_pcm_substream *subs)
{
struct pmac_stream *astr;
snd_pmac_dma_stop(rec);
astr = snd_pmac_get_stream(chip, another_stream(rec->stream));
if (! astr)
return -EINVAL;
/* reset constraints */
astr->cur_freqs = chip->freqs_ok;
astr->cur_formats = chip->formats_ok;
return 0;
}
static int snd_pmac_playback_open(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
subs->runtime->hw = snd_pmac_playback;
return snd_pmac_pcm_open(chip, &chip->playback, subs);
}
static int snd_pmac_capture_open(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
subs->runtime->hw = snd_pmac_capture;
return snd_pmac_pcm_open(chip, &chip->capture, subs);
}
static int snd_pmac_playback_close(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_close(chip, &chip->playback, subs);
}
static int snd_pmac_capture_close(struct snd_pcm_substream *subs)
{
struct snd_pmac *chip = snd_pcm_substream_chip(subs);
return snd_pmac_pcm_close(chip, &chip->capture, subs);
}
/*
*/
static struct snd_pcm_ops snd_pmac_playback_ops = {
.open = snd_pmac_playback_open,
.close = snd_pmac_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_pmac_pcm_hw_params,
.hw_free = snd_pmac_pcm_hw_free,
.prepare = snd_pmac_playback_prepare,
.trigger = snd_pmac_playback_trigger,
.pointer = snd_pmac_playback_pointer,
};
static struct snd_pcm_ops snd_pmac_capture_ops = {
.open = snd_pmac_capture_open,
.close = snd_pmac_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_pmac_pcm_hw_params,
.hw_free = snd_pmac_pcm_hw_free,
.prepare = snd_pmac_capture_prepare,
.trigger = snd_pmac_capture_trigger,
.pointer = snd_pmac_capture_pointer,
};
int __devinit snd_pmac_pcm_new(struct snd_pmac *chip)
{
struct snd_pcm *pcm;
int err;
int num_captures = 1;
if (! chip->can_capture)
num_captures = 0;
err = snd_pcm_new(chip->card, chip->card->driver, 0, 1, num_captures, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_pmac_playback_ops);
if (chip->can_capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_pmac_capture_ops);
pcm->private_data = chip;
pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
strcpy(pcm->name, chip->card->shortname);
chip->pcm = pcm;
chip->formats_ok = SNDRV_PCM_FMTBIT_S16_BE;
if (chip->can_byte_swap)
chip->formats_ok |= SNDRV_PCM_FMTBIT_S16_LE;
chip->playback.cur_formats = chip->formats_ok;
chip->capture.cur_formats = chip->formats_ok;
chip->playback.cur_freqs = chip->freqs_ok;
chip->capture.cur_freqs = chip->freqs_ok;
/* preallocate 64k buffer */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pdev->dev,
64 * 1024, 64 * 1024);
return 0;
}
static void snd_pmac_dbdma_reset(struct snd_pmac *chip)
{
out_le32(&chip->playback.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
snd_pmac_wait_ack(&chip->playback);
out_le32(&chip->capture.dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
snd_pmac_wait_ack(&chip->capture);
}
/*
* handling beep
*/
void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long addr, int speed)
{
struct pmac_stream *rec = &chip->playback;
snd_pmac_dma_stop(rec);
st_le16(&chip->extra_dma.cmds->req_count, bytes);
st_le16(&chip->extra_dma.cmds->xfer_status, 0);
st_le32(&chip->extra_dma.cmds->cmd_dep, chip->extra_dma.addr);
st_le32(&chip->extra_dma.cmds->phy_addr, addr);
st_le16(&chip->extra_dma.cmds->command, OUTPUT_MORE + BR_ALWAYS);
out_le32(&chip->awacs->control,
(in_le32(&chip->awacs->control) & ~0x1f00)
| (speed << 8));
out_le32(&chip->awacs->byteswap, 0);
snd_pmac_dma_set_command(rec, &chip->extra_dma);
snd_pmac_dma_run(rec, RUN);
}
void snd_pmac_beep_dma_stop(struct snd_pmac *chip)
{
snd_pmac_dma_stop(&chip->playback);
st_le16(&chip->extra_dma.cmds->command, DBDMA_STOP);
snd_pmac_pcm_set_format(chip); /* reset format */
}
/*
* interrupt handlers
*/
static irqreturn_t
snd_pmac_tx_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
snd_pmac_pcm_update(chip, &chip->playback);
return IRQ_HANDLED;
}
static irqreturn_t
snd_pmac_rx_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
snd_pmac_pcm_update(chip, &chip->capture);
return IRQ_HANDLED;
}
static irqreturn_t
snd_pmac_ctrl_intr(int irq, void *devid)
{
struct snd_pmac *chip = devid;
int ctrl = in_le32(&chip->awacs->control);
/*printk(KERN_DEBUG "pmac: control interrupt.. 0x%x\n", ctrl);*/
if (ctrl & MASK_PORTCHG) {
/* do something when headphone is plugged/unplugged? */
if (chip->update_automute)
chip->update_automute(chip, 1);
}
if (ctrl & MASK_CNTLERR) {
int err = (in_le32(&chip->awacs->codec_stat) & MASK_ERRCODE) >> 16;
if (err && chip->model <= PMAC_SCREAMER)
snd_printk(KERN_DEBUG "error %x\n", err);
}
/* Writing 1s to the CNTLERR and PORTCHG bits clears them... */
out_le32(&chip->awacs->control, ctrl);
return IRQ_HANDLED;
}
/*
* a wrapper to feature call for compatibility
*/
static void snd_pmac_sound_feature(struct snd_pmac *chip, int enable)
{
if (ppc_md.feature_call)
ppc_md.feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, chip->node, 0, enable);
}
/*
* release resources
*/
static int snd_pmac_free(struct snd_pmac *chip)
{
/* stop sounds */
if (chip->initialized) {
snd_pmac_dbdma_reset(chip);
/* disable interrupts from awacs interface */
out_le32(&chip->awacs->control, in_le32(&chip->awacs->control) & 0xfff);
}
if (chip->node)
snd_pmac_sound_feature(chip, 0);
/* clean up mixer if any */
if (chip->mixer_free)
chip->mixer_free(chip);
snd_pmac_detach_beep(chip);
/* release resources */
if (chip->irq >= 0)
free_irq(chip->irq, (void*)chip);
if (chip->tx_irq >= 0)
free_irq(chip->tx_irq, (void*)chip);
if (chip->rx_irq >= 0)
free_irq(chip->rx_irq, (void*)chip);
snd_pmac_dbdma_free(chip, &chip->playback.cmd);
snd_pmac_dbdma_free(chip, &chip->capture.cmd);
snd_pmac_dbdma_free(chip, &chip->extra_dma);
snd_pmac_dbdma_free(chip, &emergency_dbdma);
if (chip->macio_base)
iounmap(chip->macio_base);
if (chip->latch_base)
iounmap(chip->latch_base);
if (chip->awacs)
iounmap(chip->awacs);
if (chip->playback.dma)
iounmap(chip->playback.dma);
if (chip->capture.dma)
iounmap(chip->capture.dma);
if (chip->node) {
int i;
for (i = 0; i < 3; i++) {
if (chip->requested & (1 << i))
release_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]));
}
}
if (chip->pdev)
pci_dev_put(chip->pdev);
of_node_put(chip->node);
kfree(chip);
return 0;
}
/*
* free the device
*/
static int snd_pmac_dev_free(struct snd_device *device)
{
struct snd_pmac *chip = device->device_data;
return snd_pmac_free(chip);
}
/*
* check the machine support byteswap (little-endian)
*/
static void __devinit detect_byte_swap(struct snd_pmac *chip)
{
struct device_node *mio;
/* if seems that Keylargo can't byte-swap */
for (mio = chip->node->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0) {
if (of_device_is_compatible(mio, "Keylargo"))
chip->can_byte_swap = 0;
break;
}
}
/* it seems the Pismo & iBook can't byte-swap in hardware. */
if (of_machine_is_compatible("PowerBook3,1") ||
of_machine_is_compatible("PowerBook2,1"))
chip->can_byte_swap = 0 ;
if (of_machine_is_compatible("PowerBook2,1"))
chip->can_duplex = 0;
}
/*
* detect a sound chip
*/
static int __devinit snd_pmac_detect(struct snd_pmac *chip)
{
struct device_node *sound;
struct device_node *dn;
const unsigned int *prop;
unsigned int l;
struct macio_chip* macio;
if (!machine_is(powermac))
return -ENODEV;
chip->subframe = 0;
chip->revision = 0;
chip->freqs_ok = 0xff; /* all ok */
chip->model = PMAC_AWACS;
chip->can_byte_swap = 1;
chip->can_duplex = 1;
chip->can_capture = 1;
chip->num_freqs = ARRAY_SIZE(awacs_freqs);
chip->freq_table = awacs_freqs;
chip->pdev = NULL;
chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */
/* check machine type */
if (of_machine_is_compatible("AAPL,3400/2400")
|| of_machine_is_compatible("AAPL,3500"))
chip->is_pbook_3400 = 1;
else if (of_machine_is_compatible("PowerBook1,1")
|| of_machine_is_compatible("AAPL,PowerBook1998"))
chip->is_pbook_G3 = 1;
chip->node = of_find_node_by_name(NULL, "awacs");
sound = of_node_get(chip->node);
/*
* powermac G3 models have a node called "davbus"
* with a child called "sound".
*/
if (!chip->node)
chip->node = of_find_node_by_name(NULL, "davbus");
/*
* if we didn't find a davbus device, try 'i2s-a' since
* this seems to be what iBooks have
*/
if (! chip->node) {
chip->node = of_find_node_by_name(NULL, "i2s-a");
if (chip->node && chip->node->parent &&
chip->node->parent->parent) {
if (of_device_is_compatible(chip->node->parent->parent,
"K2-Keylargo"))
chip->is_k2 = 1;
}
}
if (! chip->node)
return -ENODEV;
if (!sound) {
sound = of_find_node_by_name(NULL, "sound");
while (sound && sound->parent != chip->node)
sound = of_find_node_by_name(sound, "sound");
}
if (! sound) {
of_node_put(chip->node);
chip->node = NULL;
return -ENODEV;
}
prop = of_get_property(sound, "sub-frame", NULL);
if (prop && *prop < 16)
chip->subframe = *prop;
prop = of_get_property(sound, "layout-id", NULL);
if (prop) {
/* partly deprecate snd-powermac, for those machines
* that have a layout-id property for now */
printk(KERN_INFO "snd-powermac no longer handles any "
"machines with a layout-id property "
"in the device-tree, use snd-aoa.\n");
of_node_put(sound);
of_node_put(chip->node);
chip->node = NULL;
return -ENODEV;
}
/* This should be verified on older screamers */
if (of_device_is_compatible(sound, "screamer")) {
chip->model = PMAC_SCREAMER;
// chip->can_byte_swap = 0; /* FIXME: check this */
}
if (of_device_is_compatible(sound, "burgundy")) {
chip->model = PMAC_BURGUNDY;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "daca")) {
chip->model = PMAC_DACA;
chip->can_capture = 0; /* no capture */
chip->can_duplex = 0;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "tumbler")) {
chip->model = PMAC_TUMBLER;
chip->can_capture = of_machine_is_compatible("PowerMac4,2")
|| of_machine_is_compatible("PowerBook3,2")
|| of_machine_is_compatible("PowerBook3,3")
|| of_machine_is_compatible("PowerBook4,1")
|| of_machine_is_compatible("PowerBook4,2")
|| of_machine_is_compatible("PowerBook4,3");
chip->can_duplex = 0;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
chip->freq_table = tumbler_freqs;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
if (of_device_is_compatible(sound, "snapper")) {
chip->model = PMAC_SNAPPER;
// chip->can_byte_swap = 0; /* FIXME: check this */
chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
chip->freq_table = tumbler_freqs;
chip->control_mask = MASK_IEPC | 0x11; /* disable IEE */
}
prop = of_get_property(sound, "device-id", NULL);
if (prop)
chip->device_id = *prop;
dn = of_find_node_by_name(NULL, "perch");
chip->has_iic = (dn != NULL);
of_node_put(dn);
/* We need the PCI device for DMA allocations, let's use a crude method
* for now ...
*/
macio = macio_find(chip->node, macio_unknown);
if (macio == NULL)
printk(KERN_WARNING "snd-powermac: can't locate macio !\n");
else {
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev) {
struct device_node *np = pci_device_to_OF_node(pdev);
if (np && np == macio->of_node) {
chip->pdev = pdev;
break;
}
}
}
if (chip->pdev == NULL)
printk(KERN_WARNING "snd-powermac: can't locate macio PCI"
" device !\n");
detect_byte_swap(chip);
/* look for a property saying what sample rates
are available */
prop = of_get_property(sound, "sample-rates", &l);
if (! prop)
prop = of_get_property(sound, "output-frame-rates", &l);
if (prop) {
int i;
chip->freqs_ok = 0;
for (l /= sizeof(int); l > 0; --l) {
unsigned int r = *prop++;
/* Apple 'Fixed' format */
if (r >= 0x10000)
r >>= 16;
for (i = 0; i < chip->num_freqs; ++i) {
if (r == chip->freq_table[i]) {
chip->freqs_ok |= (1 << i);
break;
}
}
}
} else {
/* assume only 44.1khz */
chip->freqs_ok = 1;
}
of_node_put(sound);
return 0;
}
#ifdef PMAC_SUPPORT_AUTOMUTE
/*
* auto-mute
*/
static int pmac_auto_mute_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = chip->auto_mute;
return 0;
}
static int pmac_auto_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
if (ucontrol->value.integer.value[0] != chip->auto_mute) {
chip->auto_mute = !!ucontrol->value.integer.value[0];
if (chip->update_automute)
chip->update_automute(chip, 1);
return 1;
}
return 0;
}
static int pmac_hp_detect_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
if (chip->detect_headphone)
ucontrol->value.integer.value[0] = chip->detect_headphone(chip);
else
ucontrol->value.integer.value[0] = 0;
return 0;
}
static struct snd_kcontrol_new auto_mute_controls[] __devinitdata = {
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Auto Mute Switch",
.info = snd_pmac_boolean_mono_info,
.get = pmac_auto_mute_get,
.put = pmac_auto_mute_put,
},
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detection",
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = snd_pmac_boolean_mono_info,
.get = pmac_hp_detect_get,
},
};
int __devinit snd_pmac_add_automute(struct snd_pmac *chip)
{
int err;
chip->auto_mute = 1;
err = snd_ctl_add(chip->card, snd_ctl_new1(&auto_mute_controls[0], chip));
if (err < 0) {
printk(KERN_ERR "snd-powermac: Failed to add automute control\n");
return err;
}
chip->hp_detect_ctl = snd_ctl_new1(&auto_mute_controls[1], chip);
return snd_ctl_add(chip->card, chip->hp_detect_ctl);
}
#endif /* PMAC_SUPPORT_AUTOMUTE */
/*
* create and detect a pmac chip record
*/
int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
{
struct snd_pmac *chip;
struct device_node *np;
int i, err;
unsigned int irq;
unsigned long ctrl_addr, txdma_addr, rxdma_addr;
static struct snd_device_ops ops = {
.dev_free = snd_pmac_dev_free,
};
*chip_return = NULL;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->card = card;
spin_lock_init(&chip->reg_lock);
chip->irq = chip->tx_irq = chip->rx_irq = -1;
chip->playback.stream = SNDRV_PCM_STREAM_PLAYBACK;
chip->capture.stream = SNDRV_PCM_STREAM_CAPTURE;
if ((err = snd_pmac_detect(chip)) < 0)
goto __error;
if (snd_pmac_dbdma_alloc(chip, &chip->playback.cmd, PMAC_MAX_FRAGS + 1) < 0 ||
snd_pmac_dbdma_alloc(chip, &chip->capture.cmd, PMAC_MAX_FRAGS + 1) < 0 ||
snd_pmac_dbdma_alloc(chip, &chip->extra_dma, 2) < 0 ||
snd_pmac_dbdma_alloc(chip, &emergency_dbdma, 2) < 0) {
err = -ENOMEM;
goto __error;
}
np = chip->node;
chip->requested = 0;
if (chip->is_k2) {
static char *rnames[] = {
"Sound Control", "Sound DMA" };
for (i = 0; i < 2; i ++) {
if (of_address_to_resource(np->parent, i,
&chip->rsrc[i])) {
printk(KERN_ERR "snd: can't translate rsrc "
" %d (%s)\n", i, rnames[i]);
err = -ENODEV;
goto __error;
}
if (request_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]),
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
" %d (%s: %pR)\n",
i, rnames[i], &chip->rsrc[i]);
err = -ENODEV;
goto __error;
}
chip->requested |= (1 << i);
}
ctrl_addr = chip->rsrc[0].start;
txdma_addr = chip->rsrc[1].start;
rxdma_addr = txdma_addr + 0x100;
} else {
static char *rnames[] = {
"Sound Control", "Sound Tx DMA", "Sound Rx DMA" };
for (i = 0; i < 3; i ++) {
if (of_address_to_resource(np, i,
&chip->rsrc[i])) {
printk(KERN_ERR "snd: can't translate rsrc "
" %d (%s)\n", i, rnames[i]);
err = -ENODEV;
goto __error;
}
if (request_mem_region(chip->rsrc[i].start,
resource_size(&chip->rsrc[i]),
rnames[i]) == NULL) {
printk(KERN_ERR "snd: can't request rsrc "
" %d (%s: %pR)\n",
i, rnames[i], &chip->rsrc[i]);
err = -ENODEV;
goto __error;
}
chip->requested |= (1 << i);
}
ctrl_addr = chip->rsrc[0].start;
txdma_addr = chip->rsrc[1].start;
rxdma_addr = chip->rsrc[2].start;
}
chip->awacs = ioremap(ctrl_addr, 0x1000);
chip->playback.dma = ioremap(txdma_addr, 0x100);
chip->capture.dma = ioremap(rxdma_addr, 0x100);
if (chip->model <= PMAC_BURGUNDY) {
irq = irq_of_parse_and_map(np, 0);
if (request_irq(irq, snd_pmac_ctrl_intr, 0,
"PMac", (void*)chip)) {
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n",
irq);
err = -EBUSY;
goto __error;
}
chip->irq = irq;
}
irq = irq_of_parse_and_map(np, 1);
if (request_irq(irq, snd_pmac_tx_intr, 0, "PMac Output", (void*)chip)){
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
chip->tx_irq = irq;
irq = irq_of_parse_and_map(np, 2);
if (request_irq(irq, snd_pmac_rx_intr, 0, "PMac Input", (void*)chip)) {
snd_printk(KERN_ERR "pmac: unable to grab IRQ %d\n", irq);
err = -EBUSY;
goto __error;
}
chip->rx_irq = irq;
snd_pmac_sound_feature(chip, 1);
/* reset & enable interrupts */
if (chip->model <= PMAC_BURGUNDY)
out_le32(&chip->awacs->control, chip->control_mask);
/* Powerbooks have odd ways of enabling inputs such as
an expansion-bay CD or sound from an internal modem
or a PC-card modem. */
if (chip->is_pbook_3400) {
/* Enable CD and PC-card sound inputs. */
/* This is done by reading from address
* f301a000, + 0x10 to enable the expansion-bay
* CD sound input, + 0x80 to enable the PC-card
* sound input. The 0x100 enables the SCSI bus
* terminator power.
*/
chip->latch_base = ioremap (0xf301a000, 0x1000);
in_8(chip->latch_base + 0x190);
} else if (chip->is_pbook_G3) {
struct device_node* mio;
for (mio = chip->node->parent; mio; mio = mio->parent) {
if (strcmp(mio->name, "mac-io") == 0) {
struct resource r;
if (of_address_to_resource(mio, 0, &r) == 0)
chip->macio_base =
ioremap(r.start, 0x40);
break;
}
}
/* Enable CD sound input. */
/* The relevant bits for writing to this byte are 0x8f.
* I haven't found out what the 0x80 bit does.
* For the 0xf bits, writing 3 or 7 enables the CD
* input, any other value disables it. Values
* 1, 3, 5, 7 enable the microphone. Values 0, 2,
* 4, 6, 8 - f enable the input from the modem.
*/
if (chip->macio_base)
out_8(chip->macio_base + 0x37, 3);
}
/* Reset dbdma channels */
snd_pmac_dbdma_reset(chip);
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
goto __error;
*chip_return = chip;
return 0;
__error:
snd_pmac_free(chip);
return err;
}
/*
* sleep notify for powerbook
*/
#ifdef CONFIG_PM
/*
* Save state when going to sleep, restore it afterwards.
*/
void snd_pmac_suspend(struct snd_pmac *chip)
{
unsigned long flags;
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (chip->suspend)
chip->suspend(chip);
snd_pcm_suspend_all(chip->pcm);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_pmac_beep_stop(chip);
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (chip->irq >= 0)
disable_irq(chip->irq);
if (chip->tx_irq >= 0)
disable_irq(chip->tx_irq);
if (chip->rx_irq >= 0)
disable_irq(chip->rx_irq);
snd_pmac_sound_feature(chip, 0);
}
void snd_pmac_resume(struct snd_pmac *chip)
{
snd_pmac_sound_feature(chip, 1);
if (chip->resume)
chip->resume(chip);
/* enable CD sound input */
if (chip->macio_base && chip->is_pbook_G3)
out_8(chip->macio_base + 0x37, 3);
else if (chip->is_pbook_3400)
in_8(chip->latch_base + 0x190);
snd_pmac_pcm_set_format(chip);
if (chip->irq >= 0)
enable_irq(chip->irq);
if (chip->tx_irq >= 0)
enable_irq(chip->tx_irq);
if (chip->rx_irq >= 0)
enable_irq(chip->rx_irq);
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
}
#endif /* CONFIG_PM */
| gpl-2.0 |
voodik/android_kernel_hardkernel_odroidu | drivers/ata/pata_cmd640.c | 8203 | 6854 | /*
* pata_cmd640.c - CMD640 PCI PATA for new ATA layer
* (C) 2007 Red Hat Inc
*
* Based upon
* linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996
*
* Copyright (C) 1995-1996 Linus Torvalds & authors (see driver)
*
* This drives only the PCI version of the controller. If you have a
* VLB one then we have enough docs to support it but you can write
* your own code.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cmd640"
#define DRV_VERSION "0.0.5"
struct cmd640_reg {
int last;
u8 reg58[ATA_MAX_DEVICES];
};
enum {
CFR = 0x50,
CNTRL = 0x51,
CMDTIM = 0x52,
ARTIM0 = 0x53,
DRWTIM0 = 0x54,
ARTIM23 = 0x57,
DRWTIM23 = 0x58,
BRST = 0x59
};
/**
* cmd640_set_piomode - set initial PIO mode data
* @ap: ATA port
* @adev: ATA device
*
* Called to do the PIO mode setup.
*/
static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct cmd640_reg *timing = ap->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
u8 reg;
int arttim = ARTIM0 + 2 * adev->devno;
struct ata_device *pair = ata_dev_pair(adev);
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
return;
}
/* The second channel has shared timings and the setup timing is
messy to switch to merge it for worst case */
if (ap->port_no && pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
}
/* Make the timings fit */
if (t.recover > 16) {
t.active += t.recover - 16;
t.recover = 16;
}
if (t.active > 16)
t.active = 16;
/* Now convert the clocks into values we can actually stuff into
the chip */
if (t.recover > 1)
t.recover--; /* 640B only */
else
t.recover = 15;
if (t.setup > 4)
t.setup = 0xC0;
else
t.setup = setup_data[t.setup];
if (ap->port_no == 0) {
t.active &= 0x0F; /* 0 = 16 */
/* Load setup timing */
pci_read_config_byte(pdev, arttim, ®);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, arttim, reg);
/* Load active/recovery */
pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
} else {
/* Save the shared timings for channel, they will be loaded
by qc_issue. Reloading the setup time is expensive so we
keep a merged one loaded */
pci_read_config_byte(pdev, ARTIM23, ®);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, ARTIM23, reg);
timing->reg58[adev->devno] = (t.active << 4) | t.recover;
}
}
/**
* cmd640_qc_issue - command preparation hook
* @qc: Command to be issued
*
* Channel 1 has shared timings. We must reprogram the
* clock each drive 2/3 switch we do.
*/
static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing = ap->private_data;
if (ap->port_no != 0 && adev->devno != timing->last) {
pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
timing->last = adev->devno;
}
return ata_sff_qc_issue(qc);
}
/**
* cmd640_port_start - port setup
* @ap: ATA port being set up
*
* The CMD640 needs to maintain private data structures so we
* allocate space here.
*/
static int cmd640_port_start(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing;
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
if (timing == NULL)
return -ENOMEM;
timing->last = -1; /* Force a load */
ap->private_data = timing;
return 0;
}
static bool cmd640_sff_irq_check(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int irq_reg = ap->port_no ? ARTIM23 : CFR;
u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04;
pci_read_config_byte(pdev, irq_reg, &irq_stat);
return irq_stat & irq_mask;
}
static struct scsi_host_template cmd640_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations cmd640_port_ops = {
.inherits = &ata_sff_port_ops,
/* In theory xfer_noirq is not needed once we kill the prefetcher */
.sff_data_xfer = ata_sff_data_xfer_noirq,
.sff_irq_check = cmd640_sff_irq_check,
.qc_issue = cmd640_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = cmd640_set_piomode,
.port_start = cmd640_port_start,
};
static void cmd640_hardware_init(struct pci_dev *pdev)
{
u8 ctrl;
/* CMD640 detected, commiserations */
pci_write_config_byte(pdev, 0x5B, 0x00);
/* PIO0 command cycles */
pci_write_config_byte(pdev, CMDTIM, 0);
/* 512 byte bursts (sector) */
pci_write_config_byte(pdev, BRST, 0x40);
/*
* A reporter a long time ago
* Had problems with the data fifo
* So don't run the risk
* Of putting crap on the disk
* For its better just to go slow
*/
/* Do channel 0 */
pci_read_config_byte(pdev, CNTRL, &ctrl);
pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0);
/* Ditto for channel 1 */
pci_read_config_byte(pdev, ARTIM23, &ctrl);
ctrl |= 0x0C;
pci_write_config_byte(pdev, ARTIM23, ctrl);
}
static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.port_ops = &cmd640_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
cmd640_hardware_init(pdev);
return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL, 0);
}
#ifdef CONFIG_PM
static int cmd640_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
cmd640_hardware_init(pdev);
ata_host_resume(host);
return 0;
}
#endif
static const struct pci_device_id cmd640[] = {
{ PCI_VDEVICE(CMD, 0x640), 0 },
{ },
};
static struct pci_driver cmd640_pci_driver = {
.name = DRV_NAME,
.id_table = cmd640,
.probe = cmd640_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = cmd640_reinit_one,
#endif
};
static int __init cmd640_init(void)
{
return pci_register_driver(&cmd640_pci_driver);
}
static void __exit cmd640_exit(void)
{
pci_unregister_driver(&cmd640_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cmd640);
MODULE_VERSION(DRV_VERSION);
module_init(cmd640_init);
module_exit(cmd640_exit);
| gpl-2.0 |
steppnasty/platform_kernel_msm7x30 | drivers/gpu/drm/ttm/ttm_module.c | 8459 | 2974 | /**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
* Jerome Glisse
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
#include "ttm/ttm_module.h"
#include "drm_sysfs.h"
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
/**
* Add pm ops here.
*/
};
static void ttm_drm_class_device_release(struct device *dev)
{
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
}
static struct device ttm_drm_class_device = {
.type = &ttm_drm_class_type,
.release = &ttm_drm_class_device_release
};
struct kobject *ttm_get_kobj(void)
{
struct kobject *kobj = &ttm_drm_class_device.kobj;
BUG_ON(kobj == NULL);
return kobj;
}
static int __init ttm_init(void)
{
int ret;
ret = dev_set_name(&ttm_drm_class_device, "ttm");
if (unlikely(ret != 0))
return ret;
atomic_set(&device_released, 0);
ret = drm_class_device_register(&ttm_drm_class_device);
if (unlikely(ret != 0))
goto out_no_dev_reg;
return 0;
out_no_dev_reg:
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
return ret;
}
static void __exit ttm_exit(void)
{
drm_class_device_unregister(&ttm_drm_class_device);
/**
* Refuse to unload until the TTM device is released.
* Not sure this is 100% needed.
*/
wait_event(exit_q, atomic_read(&device_released) == 1);
}
module_init(ttm_init);
module_exit(ttm_exit);
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
MODULE_LICENSE("GPL and additional rights");
| gpl-2.0 |
armStrapTools/linux-sunxi-ap6210 | drivers/net/ppp/bsd_comp.c | 9995 | 29584 | /*
* Update: The Berkeley copyright was changed, and the change
* is retroactive to all "true" BSD software (ie everything
* from UCB as opposed to other peoples code that just carried
* the same license). The new copyright doesn't clash with the
* GPL, so the module-only restriction has been removed..
*/
/* Because this code is derived from the 4.3BSD compress source:
*
* Copyright (c) 1985, 1986 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* James A. Woods, derived from original work by Spencer Thomas
* and Joseph Orost.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This version is for use with contiguous buffers on Linux-derived systems.
*
* ==FILEVERSION 20000226==
*
* NOTE TO MAINTAINERS:
* If you modify this file at all, please set the number above to the
* date of the modification as YYMMDD (year month day).
* bsd_comp.c is shipped with a PPP distribution as well as with
* the kernel; if everyone increases the FILEVERSION number above,
* then scripts can do the right thing when deciding whether to
* install a new bsd_comp.c file. Don't change the format of that
* line otherwise, so the installation script can recognize it.
*
* From: bsd_comp.c,v 1.3 1994/12/08 01:59:58 paulus Exp
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/ppp_defs.h>
#undef PACKETPTR
#define PACKETPTR 1
#include <linux/ppp-comp.h>
#undef PACKETPTR
#include <asm/byteorder.h>
/*
* PPP "BSD compress" compression
* The differences between this compression and the classic BSD LZW
* source are obvious from the requirement that the classic code worked
* with files while this handles arbitrarily long streams that
* are broken into packets. They are:
*
* When the code size expands, a block of junk is not emitted by
* the compressor and not expected by the decompressor.
*
* New codes are not necessarily assigned every time an old
* code is output by the compressor. This is because a packet
* end forces a code to be emitted, but does not imply that a
* new sequence has been seen.
*
* The compression ratio is checked at the first end of a packet
* after the appropriate gap. Besides simplifying and speeding
* things up, this makes it more likely that the transmitter
* and receiver will agree when the dictionary is cleared when
* compression is not going well.
*/
/*
* Macros to extract protocol version and number of bits
* from the third byte of the BSD Compress CCP configuration option.
*/
#define BSD_VERSION(x) ((x) >> 5)
#define BSD_NBITS(x) ((x) & 0x1F)
#define BSD_CURRENT_VERSION 1
/*
* A dictionary for doing BSD compress.
*/
struct bsd_dict {
union { /* hash value */
unsigned long fcode;
struct {
#if defined(__LITTLE_ENDIAN) /* Little endian order */
unsigned short prefix; /* preceding code */
unsigned char suffix; /* last character of new code */
unsigned char pad;
#elif defined(__BIG_ENDIAN) /* Big endian order */
unsigned char pad;
unsigned char suffix; /* last character of new code */
unsigned short prefix; /* preceding code */
#else
#error Endianness not defined...
#endif
} hs;
} f;
unsigned short codem1; /* output of hash table -1 */
unsigned short cptr; /* map code to hash table entry */
};
struct bsd_db {
int totlen; /* length of this structure */
unsigned int hsize; /* size of the hash table */
unsigned char hshift; /* used in hash function */
unsigned char n_bits; /* current bits/code */
unsigned char maxbits; /* maximum bits/code */
unsigned char debug; /* non-zero if debug desired */
unsigned char unit; /* ppp unit number */
unsigned short seqno; /* sequence # of next packet */
unsigned int mru; /* size of receive (decompress) bufr */
unsigned int maxmaxcode; /* largest valid code */
unsigned int max_ent; /* largest code in use */
unsigned int in_count; /* uncompressed bytes, aged */
unsigned int bytes_out; /* compressed bytes, aged */
unsigned int ratio; /* recent compression ratio */
unsigned int checkpoint; /* when to next check the ratio */
unsigned int clear_count; /* times dictionary cleared */
unsigned int incomp_count; /* incompressible packets */
unsigned int incomp_bytes; /* incompressible bytes */
unsigned int uncomp_count; /* uncompressed packets */
unsigned int uncomp_bytes; /* uncompressed bytes */
unsigned int comp_count; /* compressed packets */
unsigned int comp_bytes; /* compressed bytes */
unsigned short *lens; /* array of lengths of codes */
struct bsd_dict *dict; /* dictionary */
};
#define BSD_OVHD 2 /* BSD compress overhead/packet */
#define MIN_BSD_BITS 9
#define BSD_INIT_BITS MIN_BSD_BITS
#define MAX_BSD_BITS 15
static void bsd_free (void *state);
static void *bsd_alloc(unsigned char *options, int opt_len, int decomp);
static void *bsd_comp_alloc (unsigned char *options, int opt_len);
static void *bsd_decomp_alloc (unsigned char *options, int opt_len);
static int bsd_init (void *db, unsigned char *options,
int opt_len, int unit, int debug, int decomp);
static int bsd_comp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int debug);
static int bsd_decomp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int mru,
int debug);
static void bsd_reset (void *state);
static void bsd_comp_stats (void *state, struct compstat *stats);
static int bsd_compress (void *state, unsigned char *rptr,
unsigned char *obuf, int isize, int osize);
static void bsd_incomp (void *state, unsigned char *ibuf, int icnt);
static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
unsigned char *obuf, int osize);
/* These are in ppp_generic.c */
extern int ppp_register_compressor (struct compressor *cp);
extern void ppp_unregister_compressor (struct compressor *cp);
/*
* the next two codes should not be changed lightly, as they must not
* lie within the contiguous general code space.
*/
#define CLEAR 256 /* table clear output code */
#define FIRST 257 /* first free entry */
#define LAST 255
#define MAXCODE(b) ((1 << (b)) - 1)
#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
^ (unsigned long)(prefix))
#define BSD_KEY(prefix,suffix) ((((unsigned long)(suffix)) << 16) \
+ (unsigned long)(prefix))
#define CHECK_GAP 10000 /* Ratio check interval */
#define RATIO_SCALE_LOG 8
#define RATIO_SCALE (1<<RATIO_SCALE_LOG)
#define RATIO_MAX (0x7fffffff>>RATIO_SCALE_LOG)
/*
* clear the dictionary
*/
static void
bsd_clear(struct bsd_db *db)
{
db->clear_count++;
db->max_ent = FIRST-1;
db->n_bits = BSD_INIT_BITS;
db->bytes_out = 0;
db->in_count = 0;
db->ratio = 0;
db->checkpoint = CHECK_GAP;
}
/*
* If the dictionary is full, then see if it is time to reset it.
*
* Compute the compression ratio using fixed-point arithmetic
* with 8 fractional bits.
*
* Since we have an infinite stream instead of a single file,
* watch only the local compression ratio.
*
* Since both peers must reset the dictionary at the same time even in
* the absence of CLEAR codes (while packets are incompressible), they
* must compute the same ratio.
*/
static int bsd_check (struct bsd_db *db) /* 1=output CLEAR */
{
unsigned int new_ratio;
if (db->in_count >= db->checkpoint)
{
/* age the ratio by limiting the size of the counts */
if (db->in_count >= RATIO_MAX || db->bytes_out >= RATIO_MAX)
{
db->in_count -= (db->in_count >> 2);
db->bytes_out -= (db->bytes_out >> 2);
}
db->checkpoint = db->in_count + CHECK_GAP;
if (db->max_ent >= db->maxmaxcode)
{
/* Reset the dictionary only if the ratio is worse,
* or if it looks as if it has been poisoned
* by incompressible data.
*
* This does not overflow, because
* db->in_count <= RATIO_MAX.
*/
new_ratio = db->in_count << RATIO_SCALE_LOG;
if (db->bytes_out != 0)
{
new_ratio /= db->bytes_out;
}
if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE)
{
bsd_clear (db);
return 1;
}
db->ratio = new_ratio;
}
}
return 0;
}
/*
* Return statistics.
*/
static void bsd_comp_stats (void *state, struct compstat *stats)
{
struct bsd_db *db = (struct bsd_db *) state;
stats->unc_bytes = db->uncomp_bytes;
stats->unc_packets = db->uncomp_count;
stats->comp_bytes = db->comp_bytes;
stats->comp_packets = db->comp_count;
stats->inc_bytes = db->incomp_bytes;
stats->inc_packets = db->incomp_count;
stats->in_count = db->in_count;
stats->bytes_out = db->bytes_out;
}
/*
* Reset state, as on a CCP ResetReq.
*/
static void bsd_reset (void *state)
{
struct bsd_db *db = (struct bsd_db *) state;
bsd_clear(db);
db->seqno = 0;
db->clear_count = 0;
}
/*
* Release the compression structure
*/
static void bsd_free (void *state)
{
struct bsd_db *db = state;
if (!db)
return;
/*
* Release the dictionary
*/
vfree(db->dict);
db->dict = NULL;
/*
* Release the string buffer
*/
vfree(db->lens);
db->lens = NULL;
/*
* Finally release the structure itself.
*/
kfree(db);
}
/*
* Allocate space for a (de) compressor.
*/
static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
{
int bits;
unsigned int hsize, hshift, maxmaxcode;
struct bsd_db *db;
if (opt_len != 3 || options[0] != CI_BSD_COMPRESS || options[1] != 3
|| BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
{
return NULL;
}
bits = BSD_NBITS(options[2]);
switch (bits)
{
case 9: /* needs 82152 for both directions */
case 10: /* needs 84144 */
case 11: /* needs 88240 */
case 12: /* needs 96432 */
hsize = 5003;
hshift = 4;
break;
case 13: /* needs 176784 */
hsize = 9001;
hshift = 5;
break;
case 14: /* needs 353744 */
hsize = 18013;
hshift = 6;
break;
case 15: /* needs 691440 */
hsize = 35023;
hshift = 7;
break;
case 16: /* needs 1366160--far too much, */
/* hsize = 69001; */ /* and 69001 is too big for cptr */
/* hshift = 8; */ /* in struct bsd_db */
/* break; */
default:
return NULL;
}
/*
* Allocate the main control structure for this instance.
*/
maxmaxcode = MAXCODE(bits);
db = kzalloc(sizeof (struct bsd_db),
GFP_KERNEL);
if (!db)
{
return NULL;
}
/*
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
if (!db->dict)
{
bsd_free (db);
return NULL;
}
/*
* If this is the compression buffer then there is no length data.
*/
if (!decomp)
{
db->lens = NULL;
}
/*
* For decompression, the length information is needed as well.
*/
else
{
db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
if (!db->lens)
{
bsd_free (db);
return NULL;
}
}
/*
* Initialize the data information for the compression code
*/
db->totlen = sizeof (struct bsd_db) +
(sizeof (struct bsd_dict) * hsize);
db->hsize = hsize;
db->hshift = hshift;
db->maxmaxcode = maxmaxcode;
db->maxbits = bits;
return (void *) db;
}
static void *bsd_comp_alloc (unsigned char *options, int opt_len)
{
return bsd_alloc (options, opt_len, 0);
}
static void *bsd_decomp_alloc (unsigned char *options, int opt_len)
{
return bsd_alloc (options, opt_len, 1);
}
/*
* Initialize the database.
*/
static int bsd_init (void *state, unsigned char *options,
int opt_len, int unit, int debug, int decomp)
{
struct bsd_db *db = state;
int indx;
if ((opt_len != 3) || (options[0] != CI_BSD_COMPRESS) || (options[1] != 3)
|| (BSD_VERSION(options[2]) != BSD_CURRENT_VERSION)
|| (BSD_NBITS(options[2]) != db->maxbits)
|| (decomp && db->lens == NULL))
{
return 0;
}
if (decomp)
{
indx = LAST;
do
{
db->lens[indx] = 1;
}
while (indx-- > 0);
}
indx = db->hsize;
while (indx-- != 0)
{
db->dict[indx].codem1 = BADCODEM1;
db->dict[indx].cptr = 0;
}
db->unit = unit;
db->mru = 0;
#ifndef DEBUG
if (debug)
#endif
db->debug = 1;
bsd_reset(db);
return 1;
}
static int bsd_comp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int debug)
{
return bsd_init (state, options, opt_len, unit, debug, 0);
}
static int bsd_decomp_init (void *state, unsigned char *options,
int opt_len, int unit, int opthdr, int mru,
int debug)
{
return bsd_init (state, options, opt_len, unit, debug, 1);
}
/*
* Obtain pointers to the various structures in the compression tables
*/
#define dict_ptrx(p,idx) &(p->dict[idx])
#define lens_ptrx(p,idx) &(p->lens[idx])
#ifdef DEBUG
static unsigned short *lens_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx > (unsigned int) db->maxmaxcode)
{
printk ("<9>ppp: lens_ptr(%d) > max\n", idx);
idx = 0;
}
return lens_ptrx (db, idx);
}
static struct bsd_dict *dict_ptr(struct bsd_db *db, int idx)
{
if ((unsigned int) idx >= (unsigned int) db->hsize)
{
printk ("<9>ppp: dict_ptr(%d) > max\n", idx);
idx = 0;
}
return dict_ptrx (db, idx);
}
#else
#define lens_ptr(db,idx) lens_ptrx(db,idx)
#define dict_ptr(db,idx) dict_ptrx(db,idx)
#endif
/*
* compress a packet
*
* The result of this function is the size of the compressed
* packet. A zero is returned if the packet was not compressed
* for some reason, such as the size being larger than uncompressed.
*
* One change from the BSD compress command is that when the
* code size expands, we do not output a bunch of padding.
*/
static int bsd_compress (void *state, unsigned char *rptr, unsigned char *obuf,
int isize, int osize)
{
struct bsd_db *db;
int hshift;
unsigned int max_ent;
unsigned int n_bits;
unsigned int bitno;
unsigned long accm;
int ent;
unsigned long fcode;
struct bsd_dict *dictp;
unsigned char c;
int hval;
int disp;
int ilen;
int mxcode;
unsigned char *wptr;
int olen;
#define PUTBYTE(v) \
{ \
++olen; \
if (wptr) \
{ \
*wptr++ = (unsigned char) (v); \
if (olen >= osize) \
{ \
wptr = NULL; \
} \
} \
}
#define OUTPUT(ent) \
{ \
bitno -= n_bits; \
accm |= ((ent) << bitno); \
do \
{ \
PUTBYTE(accm >> 24); \
accm <<= 8; \
bitno += 8; \
} \
while (bitno <= 24); \
}
/*
* If the protocol is not in the range we're interested in,
* just return without compressing the packet. If it is,
* the protocol becomes the first byte to compress.
*/
ent = PPP_PROTOCOL(rptr);
if (ent < 0x21 || ent > 0xf9)
{
return 0;
}
db = (struct bsd_db *) state;
hshift = db->hshift;
max_ent = db->max_ent;
n_bits = db->n_bits;
bitno = 32;
accm = 0;
mxcode = MAXCODE (n_bits);
/* Initialize the output pointers */
wptr = obuf;
olen = PPP_HDRLEN + BSD_OVHD;
if (osize > isize)
{
osize = isize;
}
/* This is the PPP header information */
if (wptr)
{
*wptr++ = PPP_ADDRESS(rptr);
*wptr++ = PPP_CONTROL(rptr);
*wptr++ = 0;
*wptr++ = PPP_COMP;
*wptr++ = db->seqno >> 8;
*wptr++ = db->seqno;
}
/* Skip the input header */
rptr += PPP_HDRLEN;
isize -= PPP_HDRLEN;
ilen = ++isize; /* Low byte of protocol is counted as input */
while (--ilen > 0)
{
c = *rptr++;
fcode = BSD_KEY (ent, c);
hval = BSD_HASH (ent, c, hshift);
dictp = dict_ptr (db, hval);
/* Validate and then check the entry. */
if (dictp->codem1 >= max_ent)
{
goto nomatch;
}
if (dictp->f.fcode == fcode)
{
ent = dictp->codem1 + 1;
continue; /* found (prefix,suffix) */
}
/* continue probing until a match or invalid entry */
disp = (hval == 0) ? 1 : hval;
do
{
hval += disp;
if (hval >= db->hsize)
{
hval -= db->hsize;
}
dictp = dict_ptr (db, hval);
if (dictp->codem1 >= max_ent)
{
goto nomatch;
}
}
while (dictp->f.fcode != fcode);
ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */
continue;
nomatch:
OUTPUT(ent); /* output the prefix */
/* code -> hashtable */
if (max_ent < db->maxmaxcode)
{
struct bsd_dict *dictp2;
struct bsd_dict *dictp3;
int indx;
/* expand code size if needed */
if (max_ent >= mxcode)
{
db->n_bits = ++n_bits;
mxcode = MAXCODE (n_bits);
}
/* Invalidate old hash table entry using
* this code, and then take it over.
*/
dictp2 = dict_ptr (db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr (db, indx);
if (dictp3->codem1 == max_ent)
{
dictp3->codem1 = BADCODEM1;
}
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->f.fcode = fcode;
db->max_ent = ++max_ent;
if (db->lens)
{
unsigned short *len1 = lens_ptr (db, max_ent);
unsigned short *len2 = lens_ptr (db, ent);
*len1 = *len2 + 1;
}
}
ent = c;
}
OUTPUT(ent); /* output the last code */
db->bytes_out += olen - PPP_HDRLEN - BSD_OVHD;
db->uncomp_bytes += isize;
db->in_count += isize;
++db->uncomp_count;
++db->seqno;
if (bitno < 32)
{
++db->bytes_out; /* must be set before calling bsd_check */
}
/*
* Generate the clear command if needed
*/
if (bsd_check(db))
{
OUTPUT (CLEAR);
}
/*
* Pad dribble bits of last code with ones.
* Do not emit a completely useless byte of ones.
*/
if (bitno != 32)
{
PUTBYTE((accm | (0xff << (bitno-8))) >> 24);
}
/*
* Increase code size if we would have without the packet
* boundary because the decompressor will do so.
*/
if (max_ent >= mxcode && max_ent < db->maxmaxcode)
{
db->n_bits++;
}
/* If output length is too large then this is an incomplete frame. */
if (wptr == NULL)
{
++db->incomp_count;
db->incomp_bytes += isize;
olen = 0;
}
else /* Count the number of compressed frames */
{
++db->comp_count;
db->comp_bytes += olen;
}
/* Return the resulting output length */
return olen;
#undef OUTPUT
#undef PUTBYTE
}
/*
* Update the "BSD Compress" dictionary on the receiver for
* incompressible data by pretending to compress the incoming data.
*/
static void bsd_incomp (void *state, unsigned char *ibuf, int icnt)
{
(void) bsd_compress (state, ibuf, (char *) 0, icnt, 0);
}
/*
* Decompress "BSD Compress".
*
* Because of patent problems, we return DECOMP_ERROR for errors
* found by inspecting the input data and for system problems, but
* DECOMP_FATALERROR for any errors which could possibly be said to
* be being detected "after" decompression. For DECOMP_ERROR,
* we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be
* infringing a patent of Motorola's if we do, so we take CCP down
* instead.
*
* Given that the frame has the correct sequence number and a good FCS,
* errors such as invalid codes in the input most likely indicate a
* bug, so we return DECOMP_FATALERROR for them in order to turn off
* compression, even though they are detected by inspecting the input.
*/
static int bsd_decompress (void *state, unsigned char *ibuf, int isize,
unsigned char *obuf, int osize)
{
struct bsd_db *db;
unsigned int max_ent;
unsigned long accm;
unsigned int bitno; /* 1st valid bit in accm */
unsigned int n_bits;
unsigned int tgtbitno; /* bitno when we have a code */
struct bsd_dict *dictp;
int explen;
int seq;
unsigned int incode;
unsigned int oldcode;
unsigned int finchar;
unsigned char *p;
unsigned char *wptr;
int adrs;
int ctrl;
int ilen;
int codelen;
int extra;
db = (struct bsd_db *) state;
max_ent = db->max_ent;
accm = 0;
bitno = 32; /* 1st valid bit in accm */
n_bits = db->n_bits;
tgtbitno = 32 - n_bits; /* bitno when we have a code */
/*
* Save the address/control from the PPP header
* and then get the sequence number.
*/
adrs = PPP_ADDRESS (ibuf);
ctrl = PPP_CONTROL (ibuf);
seq = (ibuf[4] << 8) + ibuf[5];
ibuf += (PPP_HDRLEN + 2);
ilen = isize - (PPP_HDRLEN + 2);
/*
* Check the sequence number and give up if it differs from
* the value we're expecting.
*/
if (seq != db->seqno)
{
if (db->debug)
{
printk("bsd_decomp%d: bad sequence # %d, expected %d\n",
db->unit, seq, db->seqno - 1);
}
return DECOMP_ERROR;
}
++db->seqno;
db->bytes_out += ilen;
/*
* Fill in the ppp header, but not the last byte of the protocol
* (that comes from the decompressed data).
*/
wptr = obuf;
*wptr++ = adrs;
*wptr++ = ctrl;
*wptr++ = 0;
oldcode = CLEAR;
explen = 3;
/*
* Keep the checkpoint correctly so that incompressible packets
* clear the dictionary at the proper times.
*/
for (;;)
{
if (ilen-- <= 0)
{
db->in_count += (explen - 3); /* don't count the header */
break;
}
/*
* Accumulate bytes until we have a complete code.
* Then get the next code, relying on the 32-bit,
* unsigned accm to mask the result.
*/
bitno -= 8;
accm |= *ibuf++ << bitno;
if (tgtbitno < bitno)
{
continue;
}
incode = accm >> tgtbitno;
accm <<= n_bits;
bitno += n_bits;
/*
* The dictionary must only be cleared at the end of a packet.
*/
if (incode == CLEAR)
{
if (ilen > 0)
{
if (db->debug)
{
printk("bsd_decomp%d: bad CLEAR\n", db->unit);
}
return DECOMP_FATALERROR; /* probably a bug */
}
bsd_clear(db);
break;
}
if ((incode > max_ent + 2) || (incode > db->maxmaxcode)
|| (incode > max_ent && oldcode == CLEAR))
{
if (db->debug)
{
printk("bsd_decomp%d: bad code 0x%x oldcode=0x%x ",
db->unit, incode, oldcode);
printk("max_ent=0x%x explen=%d seqno=%d\n",
max_ent, explen, db->seqno);
}
return DECOMP_FATALERROR; /* probably a bug */
}
/* Special case for KwKwK string. */
if (incode > max_ent)
{
finchar = oldcode;
extra = 1;
}
else
{
finchar = incode;
extra = 0;
}
codelen = *(lens_ptr (db, finchar));
explen += codelen + extra;
if (explen > osize)
{
if (db->debug)
{
printk("bsd_decomp%d: ran out of mru\n", db->unit);
#ifdef DEBUG
printk(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n",
ilen, finchar, codelen, explen);
#endif
}
return DECOMP_FATALERROR;
}
/*
* Decode this code and install it in the decompressed buffer.
*/
wptr += codelen;
p = wptr;
while (finchar > LAST)
{
struct bsd_dict *dictp2 = dict_ptr (db, finchar);
dictp = dict_ptr (db, dictp2->cptr);
#ifdef DEBUG
if (--codelen <= 0 || dictp->codem1 != finchar-1)
{
if (codelen <= 0)
{
printk("bsd_decomp%d: fell off end of chain ", db->unit);
printk("0x%x at 0x%x by 0x%x, max_ent=0x%x\n",
incode, finchar, dictp2->cptr, max_ent);
}
else
{
if (dictp->codem1 != finchar-1)
{
printk("bsd_decomp%d: bad code chain 0x%x "
"finchar=0x%x ",
db->unit, incode, finchar);
printk("oldcode=0x%x cptr=0x%x codem1=0x%x\n",
oldcode, dictp2->cptr, dictp->codem1);
}
}
return DECOMP_FATALERROR;
}
#endif
*--p = dictp->f.hs.suffix;
finchar = dictp->f.hs.prefix;
}
*--p = finchar;
#ifdef DEBUG
if (--codelen != 0)
{
printk("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n",
db->unit, codelen, incode, max_ent);
}
#endif
if (extra) /* the KwKwK case again */
{
*wptr++ = finchar;
}
/*
* If not first code in a packet, and
* if not out of code space, then allocate a new code.
*
* Keep the hash table correct so it can be used
* with uncompressed packets.
*/
if (oldcode != CLEAR && max_ent < db->maxmaxcode)
{
struct bsd_dict *dictp2, *dictp3;
unsigned short *lens1, *lens2;
unsigned long fcode;
int hval, disp, indx;
fcode = BSD_KEY(oldcode,finchar);
hval = BSD_HASH(oldcode,finchar,db->hshift);
dictp = dict_ptr (db, hval);
/* look for a free hash table entry */
if (dictp->codem1 < max_ent)
{
disp = (hval == 0) ? 1 : hval;
do
{
hval += disp;
if (hval >= db->hsize)
{
hval -= db->hsize;
}
dictp = dict_ptr (db, hval);
}
while (dictp->codem1 < max_ent);
}
/*
* Invalidate previous hash table entry
* assigned this code, and then take it over
*/
dictp2 = dict_ptr (db, max_ent + 1);
indx = dictp2->cptr;
dictp3 = dict_ptr (db, indx);
if (dictp3->codem1 == max_ent)
{
dictp3->codem1 = BADCODEM1;
}
dictp2->cptr = hval;
dictp->codem1 = max_ent;
dictp->f.fcode = fcode;
db->max_ent = ++max_ent;
/* Update the length of this string. */
lens1 = lens_ptr (db, max_ent);
lens2 = lens_ptr (db, oldcode);
*lens1 = *lens2 + 1;
/* Expand code size if needed. */
if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode)
{
db->n_bits = ++n_bits;
tgtbitno = 32-n_bits;
}
}
oldcode = incode;
}
++db->comp_count;
++db->uncomp_count;
db->comp_bytes += isize - BSD_OVHD - PPP_HDRLEN;
db->uncomp_bytes += explen;
if (bsd_check(db))
{
if (db->debug)
{
printk("bsd_decomp%d: peer should have cleared dictionary on %d\n",
db->unit, db->seqno - 1);
}
}
return explen;
}
/*************************************************************
* Table of addresses for the BSD compression module
*************************************************************/
static struct compressor ppp_bsd_compress = {
.compress_proto = CI_BSD_COMPRESS,
.comp_alloc = bsd_comp_alloc,
.comp_free = bsd_free,
.comp_init = bsd_comp_init,
.comp_reset = bsd_reset,
.compress = bsd_compress,
.comp_stat = bsd_comp_stats,
.decomp_alloc = bsd_decomp_alloc,
.decomp_free = bsd_free,
.decomp_init = bsd_decomp_init,
.decomp_reset = bsd_reset,
.decompress = bsd_decompress,
.incomp = bsd_incomp,
.decomp_stat = bsd_comp_stats,
.owner = THIS_MODULE
};
/*************************************************************
* Module support routines
*************************************************************/
static int __init bsdcomp_init(void)
{
int answer = ppp_register_compressor(&ppp_bsd_compress);
if (answer == 0)
printk(KERN_INFO "PPP BSD Compression module registered\n");
return answer;
}
static void __exit bsdcomp_cleanup(void)
{
ppp_unregister_compressor(&ppp_bsd_compress);
}
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
| gpl-2.0 |
nimengyu2/dm3730-android-gingerbread-2.3-dk1.0-kernel | drivers/media/video/cx231xx/cx231xx-video.c | 12 | 58823 | /*
cx231xx-video.c - driver for Conexant Cx23100/101/102
USB video capture devices
Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
Based on em28xx driver
Based on cx23885 driver
Based on cx88 driver
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
#include <media/msp3400.h>
#include <media/tuner.h>
#include "dvb_frontend.h"
#include "cx231xx.h"
#include "cx231xx-vbi.h"
#define CX231XX_VERSION_CODE KERNEL_VERSION(0, 0, 1)
#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
#define cx231xx_videodbg(fmt, arg...) do {\
if (video_debug) \
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); } while (0)
static unsigned int isoc_debug;
module_param(isoc_debug, int, 0644);
MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
#define cx231xx_isocdbg(fmt, arg...) \
do {\
if (isoc_debug) { \
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __func__ , ##arg); \
} \
} while (0)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
MODULE_PARM_DESC(video_nr, "video device numbers");
MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
MODULE_PARM_DESC(radio_nr, "radio device numbers");
static unsigned int video_debug;
module_param(video_debug, int, 0644);
MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
/* supported video standards */
static struct cx231xx_fmt format[] = {
{
.name = "16bpp YUY2, 4:2:2, packed",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.reg = 0,
},
};
/* supported controls */
/* Common to all boards */
/* ------------------------------------------------------------------- */
static const struct v4l2_queryctrl no_ctl = {
.name = "42",
.flags = V4L2_CTRL_FLAG_DISABLED,
};
static struct cx231xx_ctrl cx231xx_ctls[] = {
/* --- video --- */
{
.v = {
.id = V4L2_CID_BRIGHTNESS,
.name = "Brightness",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = LUMA_CTRL,
.mask = 0x00ff,
.shift = 0,
}, {
.v = {
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = LUMA_CTRL,
.mask = 0xff00,
.shift = 8,
}, {
.v = {
.id = V4L2_CID_HUE,
.name = "Hue",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = CHROMA_CTRL,
.mask = 0xff0000,
.shift = 16,
}, {
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
.v = {
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = CHROMA_CTRL,
.mask = 0x00ff,
.shift = 0,
}, {
/* --- audio --- */
.v = {
.id = V4L2_CID_AUDIO_MUTE,
.name = "Mute",
.minimum = 0,
.maximum = 1,
.default_value = 1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = PATH1_CTL1,
.mask = (0x1f << 24),
.shift = 24,
}, {
.v = {
.id = V4L2_CID_AUDIO_VOLUME,
.name = "Volume",
.minimum = 0,
.maximum = 0x3f,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = PATH1_VOL_CTL,
.mask = 0xff,
.shift = 0,
}
};
static const int CX231XX_CTLS = ARRAY_SIZE(cx231xx_ctls);
static const u32 cx231xx_user_ctrls[] = {
V4L2_CID_USER_CLASS,
V4L2_CID_BRIGHTNESS,
V4L2_CID_CONTRAST,
V4L2_CID_SATURATION,
V4L2_CID_HUE,
V4L2_CID_AUDIO_VOLUME,
#if 0
V4L2_CID_AUDIO_BALANCE,
#endif
V4L2_CID_AUDIO_MUTE,
0
};
static const u32 *ctrl_classes[] = {
cx231xx_user_ctrls,
NULL
};
/* ------------------------------------------------------------------
Video buffer and parser functions
------------------------------------------------------------------*/
/*
* Announces that a buffer were filled and request the next
*/
static inline void buffer_filled(struct cx231xx *dev,
struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
buf->vb.state = VIDEOBUF_DONE;
buf->vb.field_count++;
do_gettimeofday(&buf->vb.ts);
dev->video_mode.isoc_ctl.buf = NULL;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
}
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
switch (status) {
case -ENOENT:
errmsg = "unlinked synchronuously";
break;
case -ECONNRESET:
errmsg = "unlinked asynchronuously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
break;
case -EPIPE:
errmsg = "Stalled (device not responding)";
break;
case -EOVERFLOW:
errmsg = "Babble (bad cable?)";
break;
case -EPROTO:
errmsg = "Bit-stuff error (bad cable?)";
break;
case -EILSEQ:
errmsg = "CRC/Timeout (could be anything)";
break;
case -ETIME:
errmsg = "Device does not respond";
break;
}
if (packet < 0) {
cx231xx_isocdbg("URB status %d [%s].\n", status, errmsg);
} else {
cx231xx_isocdbg("URB packet %d, status %d [%s].\n",
packet, status, errmsg);
}
}
/*
* video-buf generic routine to get the next available buffer
*/
static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer **buf)
{
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
char *outp;
if (list_empty(&dma_q->active)) {
cx231xx_isocdbg("No active queue to serve\n");
dev->video_mode.isoc_ctl.buf = NULL;
*buf = NULL;
return;
}
/* Get the next buffer */
*buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
/* Cleans up buffer - Usefull for testing for frame/URB loss */
outp = videobuf_to_vmalloc(&(*buf)->vb);
memset(outp, 0, (*buf)->vb.size);
dev->video_mode.isoc_ctl.buf = *buf;
return;
}
/*
* Controls the isoc copy of each urb packet
*/
static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
{
struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
unsigned char *outp = NULL;
int i, rc = 1;
unsigned char *p_buffer;
u32 bytes_parsed = 0, buffer_size = 0;
u8 sav_eav = 0;
if (!dev)
return 0;
if ((dev->state & DEV_DISCONNECTED) || (dev->state & DEV_MISCONFIGURED))
return 0;
if (urb->status < 0) {
print_err_status(dev, -1, urb->status);
if (urb->status == -ENOENT)
return 0;
}
buf = dev->video_mode.isoc_ctl.buf;
if (buf != NULL)
outp = videobuf_to_vmalloc(&buf->vb);
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
if (status < 0) {
print_err_status(dev, i, status);
if (urb->iso_frame_desc[i].status != -EPROTO)
continue;
}
if (urb->iso_frame_desc[i].actual_length <= 0) {
/* cx231xx_isocdbg("packet %d is empty",i); - spammy */
continue;
}
if (urb->iso_frame_desc[i].actual_length >
dev->video_mode.max_pkt_size) {
cx231xx_isocdbg("packet bigger than packet size");
continue;
}
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
buffer_size = urb->iso_frame_desc[i].actual_length;
bytes_parsed = 0;
if (dma_q->is_partial_line) {
/* Handle the case of a partial line */
sav_eav = dma_q->last_sav;
} else {
/* Check for a SAV/EAV overlapping
the buffer boundary */
sav_eav =
cx231xx_find_boundary_SAV_EAV(p_buffer,
dma_q->partial_buf,
&bytes_parsed);
}
sav_eav &= 0xF0;
/* Get the first line if we have some portion of an SAV/EAV from
the last buffer or a partial line */
if (sav_eav) {
bytes_parsed += cx231xx_get_video_line(dev, dma_q,
sav_eav, /* SAV/EAV */
p_buffer + bytes_parsed, /* p_buffer */
buffer_size - bytes_parsed);/* buf size */
}
/* Now parse data that is completely in this buffer */
/* dma_q->is_partial_line = 0; */
while (bytes_parsed < buffer_size) {
u32 bytes_used = 0;
sav_eav = cx231xx_find_next_SAV_EAV(
p_buffer + bytes_parsed, /* p_buffer */
buffer_size - bytes_parsed, /* buf size */
&bytes_used);/* bytes used to get SAV/EAV */
bytes_parsed += bytes_used;
sav_eav &= 0xF0;
if (sav_eav && (bytes_parsed < buffer_size)) {
bytes_parsed += cx231xx_get_video_line(dev,
dma_q, sav_eav, /* SAV/EAV */
p_buffer + bytes_parsed,/* p_buffer */
buffer_size - bytes_parsed);/*buf size*/
}
}
/* Save the last four bytes of the buffer so we can check the
buffer boundary condition next time */
memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
bytes_parsed = 0;
}
return rc;
}
u8 cx231xx_find_boundary_SAV_EAV(u8 *p_buffer, u8 *partial_buf,
u32 *p_bytes_used)
{
u32 bytes_used;
u8 boundary_bytes[8];
u8 sav_eav = 0;
*p_bytes_used = 0;
/* Create an array of the last 4 bytes of the last buffer and the first
4 bytes of the current buffer. */
memcpy(boundary_bytes, partial_buf, 4);
memcpy(boundary_bytes + 4, p_buffer, 4);
/* Check for the SAV/EAV in the boundary buffer */
sav_eav = cx231xx_find_next_SAV_EAV((u8 *)&boundary_bytes, 8,
&bytes_used);
if (sav_eav) {
/* found a boundary SAV/EAV. Updates the bytes used to reflect
only those used in the new buffer */
*p_bytes_used = bytes_used - 4;
}
return sav_eav;
}
u8 cx231xx_find_next_SAV_EAV(u8 *p_buffer, u32 buffer_size, u32 *p_bytes_used)
{
u32 i;
u8 sav_eav = 0;
/*
* Don't search if the buffer size is less than 4. It causes a page
* fault since buffer_size - 4 evaluates to a large number in that
* case.
*/
if (buffer_size < 4) {
*p_bytes_used = buffer_size;
return 0;
}
for (i = 0; i < (buffer_size - 3); i++) {
if ((p_buffer[i] == 0xFF) &&
(p_buffer[i + 1] == 0x00) && (p_buffer[i + 2] == 0x00)) {
*p_bytes_used = i + 4;
sav_eav = p_buffer[i + 3];
return sav_eav;
}
}
*p_bytes_used = buffer_size;
return 0;
}
u32 cx231xx_get_video_line(struct cx231xx *dev,
struct cx231xx_dmaqueue *dma_q, u8 sav_eav,
u8 *p_buffer, u32 buffer_size)
{
u32 bytes_copied = 0;
int current_field = -1;
switch (sav_eav) {
case SAV_ACTIVE_VIDEO_FIELD1:
/* looking for skipped line which occurred in PAL 720x480 mode.
In this case, there will be no active data contained
between the SAV and EAV */
if ((buffer_size > 3) && (p_buffer[0] == 0xFF) &&
(p_buffer[1] == 0x00) && (p_buffer[2] == 0x00) &&
((p_buffer[3] == EAV_ACTIVE_VIDEO_FIELD1) ||
(p_buffer[3] == EAV_ACTIVE_VIDEO_FIELD2) ||
(p_buffer[3] == EAV_VBLANK_FIELD1) ||
(p_buffer[3] == EAV_VBLANK_FIELD2)))
return bytes_copied;
current_field = 1;
break;
case SAV_ACTIVE_VIDEO_FIELD2:
/* looking for skipped line which occurred in PAL 720x480 mode.
In this case, there will be no active data contained between
the SAV and EAV */
if ((buffer_size > 3) && (p_buffer[0] == 0xFF) &&
(p_buffer[1] == 0x00) && (p_buffer[2] == 0x00) &&
((p_buffer[3] == EAV_ACTIVE_VIDEO_FIELD1) ||
(p_buffer[3] == EAV_ACTIVE_VIDEO_FIELD2) ||
(p_buffer[3] == EAV_VBLANK_FIELD1) ||
(p_buffer[3] == EAV_VBLANK_FIELD2)))
return bytes_copied;
current_field = 2;
break;
}
dma_q->last_sav = sav_eav;
bytes_copied = cx231xx_copy_video_line(dev, dma_q, p_buffer,
buffer_size, current_field);
return bytes_copied;
}
u32 cx231xx_copy_video_line(struct cx231xx *dev,
struct cx231xx_dmaqueue *dma_q, u8 *p_line,
u32 length, int field_number)
{
u32 bytes_to_copy;
struct cx231xx_buffer *buf;
u32 _line_size = dev->width * 2;
if (dma_q->current_field != field_number)
cx231xx_reset_video_buffer(dev, dma_q);
/* get the buffer pointer */
buf = dev->video_mode.isoc_ctl.buf;
/* Remember the field number for next time */
dma_q->current_field = field_number;
bytes_to_copy = dma_q->bytes_left_in_line;
if (bytes_to_copy > length)
bytes_to_copy = length;
if (dma_q->lines_completed >= dma_q->lines_per_field) {
dma_q->bytes_left_in_line -= bytes_to_copy;
dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0) ?
0 : 1;
return 0;
}
dma_q->is_partial_line = 1;
/* If we don't have a buffer, just return the number of bytes we would
have copied if we had a buffer. */
if (!buf) {
dma_q->bytes_left_in_line -= bytes_to_copy;
dma_q->is_partial_line = (dma_q->bytes_left_in_line == 0)
? 0 : 1;
return bytes_to_copy;
}
/* copy the data to video buffer */
cx231xx_do_copy(dev, dma_q, p_line, bytes_to_copy);
dma_q->pos += bytes_to_copy;
dma_q->bytes_left_in_line -= bytes_to_copy;
if (dma_q->bytes_left_in_line == 0) {
dma_q->bytes_left_in_line = _line_size;
dma_q->lines_completed++;
dma_q->is_partial_line = 0;
if (cx231xx_is_buffer_done(dev, dma_q) && buf) {
buffer_filled(dev, dma_q, buf);
dma_q->pos = 0;
buf = NULL;
dma_q->lines_completed = 0;
}
}
return bytes_to_copy;
}
void cx231xx_reset_video_buffer(struct cx231xx *dev,
struct cx231xx_dmaqueue *dma_q)
{
struct cx231xx_buffer *buf;
/* handle the switch from field 1 to field 2 */
if (dma_q->current_field == 1) {
if (dma_q->lines_completed >= dma_q->lines_per_field)
dma_q->field1_done = 1;
else
dma_q->field1_done = 0;
}
buf = dev->video_mode.isoc_ctl.buf;
if (buf == NULL) {
u8 *outp = NULL;
/* first try to get the buffer */
get_next_buf(dma_q, &buf);
if (buf)
outp = videobuf_to_vmalloc(&buf->vb);
dma_q->pos = 0;
dma_q->field1_done = 0;
dma_q->current_field = -1;
}
/* reset the counters */
dma_q->bytes_left_in_line = dev->width << 1;
dma_q->lines_completed = 0;
}
int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
u8 *p_buffer, u32 bytes_to_copy)
{
u8 *p_out_buffer = NULL;
u32 current_line_bytes_copied = 0;
struct cx231xx_buffer *buf;
u32 _line_size = dev->width << 1;
void *startwrite;
int offset, lencopy;
buf = dev->video_mode.isoc_ctl.buf;
if (buf == NULL)
return -1;
p_out_buffer = videobuf_to_vmalloc(&buf->vb);
current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line;
/* Offset field 2 one line from the top of the buffer */
offset = (dma_q->current_field == 1) ? 0 : _line_size;
/* Offset for field 2 */
startwrite = p_out_buffer + offset;
/* lines already completed in the current field */
startwrite += (dma_q->lines_completed * _line_size * 2);
/* bytes already completed in the current line */
startwrite += current_line_bytes_copied;
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
bytes_to_copy : dma_q->bytes_left_in_line;
if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + buf->vb.size))
return 0;
/* The below copies the UYVY data straight into video buffer */
cx231xx_swab((u16 *) p_buffer, (u16 *) startwrite, (u16) lencopy);
return 0;
}
void cx231xx_swab(u16 *from, u16 *to, u16 len)
{
u16 i;
if (len <= 0)
return;
for (i = 0; i < len / 2; i++)
to[i] = (from[i] << 8) | (from[i] >> 8);
}
u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q)
{
u8 buffer_complete = 0;
/* Dual field stream */
buffer_complete = ((dma_q->current_field == 2) &&
(dma_q->lines_completed >= dma_q->lines_per_field) &&
dma_q->field1_done);
return buffer_complete;
}
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
static int
buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = fh->dev;
struct v4l2_frequency f;
*size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
if (0 == *count)
*count = CX231XX_DEF_BUF;
if (*count < CX231XX_MIN_BUF)
*count = CX231XX_MIN_BUF;
/* Ask tuner to go to analog mode */
memset(&f, 0, sizeof(f));
f.frequency = dev->ctl_freq;
f.type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
call_all(dev, tuner, s_frequency, &f);
return 0;
}
/* This is called *without* dev->slock held; please keep it that way */
static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
{
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = fh->dev;
unsigned long flags = 0;
if (in_interrupt())
BUG();
/* We used to wait for the buffer to finish here, but this didn't work
because, as we were keeping the state as VIDEOBUF_QUEUED,
videobuf_queue_cancel marked it as finished for us.
(Also, it could wedge forever if the hardware was misconfigured.)
This should be safe; by the time we get here, the buffer isn't
queued anymore. If we ever start marking the buffers as
VIDEOBUF_ACTIVE, it won't be, though.
*/
spin_lock_irqsave(&dev->video_mode.slock, flags);
if (dev->video_mode.isoc_ctl.buf == buf)
dev->video_mode.isoc_ctl.buf = NULL;
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
videobuf_vmalloc_free(&buf->vb);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static int
buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx_buffer *buf =
container_of(vb, struct cx231xx_buffer, vb);
struct cx231xx *dev = fh->dev;
int rc = 0, urb_init = 0;
/* The only currently supported format is 16 bits/pixel */
buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
+ 7) >> 3;
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
buf->vb.width = dev->width;
buf->vb.height = dev->height;
buf->vb.field = field;
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
goto fail;
}
if (!dev->video_mode.isoc_ctl.num_bufs)
urb_init = 1;
if (urb_init) {
rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
CX231XX_NUM_BUFS,
dev->video_mode.max_pkt_size,
cx231xx_isoc_copy);
if (rc < 0)
goto fail;
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
free_buffer(vq, buf);
return rc;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct cx231xx_buffer *buf =
container_of(vb, struct cx231xx_buffer, vb);
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = fh->dev;
struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct cx231xx_buffer *buf =
container_of(vb, struct cx231xx_buffer, vb);
struct cx231xx_fh *fh = vq->priv_data;
struct cx231xx *dev = (struct cx231xx *)fh->dev;
cx231xx_isocdbg("cx231xx: called buffer_release\n");
free_buffer(vq, buf);
}
static struct videobuf_queue_ops cx231xx_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/********************* v4l2 interface **************************************/
void video_mux(struct cx231xx *dev, int index)
{
dev->video_input = index;
dev->ctl_ainput = INPUT(index)->amux;
cx231xx_set_video_input_mux(dev, index);
cx25840_call(dev, video, s_routing, INPUT(index)->vmux, 0, 0);
cx231xx_set_audio_input(dev, dev->ctl_ainput);
cx231xx_info("video_mux : %d\n", index);
/* do mode control overrides if required */
cx231xx_do_mode_ctrl_overrides(dev);
}
/* Usage lock check functions */
static int res_get(struct cx231xx_fh *fh)
{
struct cx231xx *dev = fh->dev;
int rc = 0;
/* This instance already has stream_on */
if (fh->stream_on)
return rc;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
if (dev->stream_on)
return -EBUSY;
dev->stream_on = 1;
} else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
if (dev->vbi_stream_on)
return -EBUSY;
dev->vbi_stream_on = 1;
} else
return -EINVAL;
fh->stream_on = 1;
return rc;
}
static int res_check(struct cx231xx_fh *fh)
{
return fh->stream_on;
}
static void res_free(struct cx231xx_fh *fh)
{
struct cx231xx *dev = fh->dev;
fh->stream_on = 0;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
dev->stream_on = 0;
if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
dev->vbi_stream_on = 0;
}
static int check_dev(struct cx231xx *dev)
{
if (dev->state & DEV_DISCONNECTED) {
cx231xx_errdev("v4l2 ioctl: device not present\n");
return -ENODEV;
}
if (dev->state & DEV_MISCONFIGURED) {
cx231xx_errdev("v4l2 ioctl: device is misconfigured; "
"close and open it again\n");
return -EIO;
}
return 0;
}
static void get_scale(struct cx231xx *dev,
unsigned int width, unsigned int height,
unsigned int *hscale, unsigned int *vscale)
{
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
*hscale = (((unsigned long)maxw) << 12) / width - 4096L;
if (*hscale >= 0x4000)
*hscale = 0x3fff;
*vscale = (((unsigned long)maxh) << 12) / height - 4096L;
if (*vscale >= 0x4000)
*vscale = 0x3fff;
}
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
mutex_lock(&dev->lock);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.pixelformat = dev->format->fourcc;
f->fmt.pix.bytesperline = (dev->width * dev->format->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * dev->height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
mutex_unlock(&dev->lock);
return 0;
}
static struct cx231xx_fmt *format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(format); i++)
if (format[i].fourcc == fourcc)
return &format[i];
return NULL;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
unsigned int width = f->fmt.pix.width;
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
struct cx231xx_fmt *fmt;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (!fmt) {
cx231xx_videodbg("Fourcc format (%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
/* width must even because of the YUYV format
height must be even because of interlacing */
v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
get_scale(dev, width, height, &hscale, &vscale);
width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
f->fmt.pix.width = width;
f->fmt.pix.height = height;
f->fmt.pix.pixelformat = fmt->fourcc;
f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
struct cx231xx_fmt *fmt;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
vidioc_try_fmt_vid_cap(file, priv, f);
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (!fmt) {
rc = -EINVAL;
goto out;
}
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
cx231xx_errdev("%s queue busy\n", __func__);
rc = -EBUSY;
goto out;
}
if (dev->stream_on && !fh->stream_on) {
cx231xx_errdev("%s device in use by another fh\n", __func__);
rc = -EBUSY;
goto out;
}
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
dev->format = fmt;
get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
call_all(dev, video, s_fmt, f);
/* Set the correct alternate setting for this resolution */
cx231xx_resolution_set(dev);
out:
mutex_unlock(&dev->lock);
return rc;
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id * id)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
*id = dev->norm;
return 0;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
struct v4l2_format f;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
cx231xx_info("vidioc_s_std : 0x%x\n", (unsigned int)*norm);
mutex_lock(&dev->lock);
dev->norm = *norm;
/* Adjusts width/height, if needed */
f.fmt.pix.width = dev->width;
f.fmt.pix.height = dev->height;
vidioc_try_fmt_vid_cap(file, priv, &f);
/* set new image size */
dev->width = f.fmt.pix.width;
dev->height = f.fmt.pix.height;
get_scale(dev, dev->width, dev->height, &dev->hscale, &dev->vscale);
call_all(dev, core, s_std, dev->norm);
mutex_unlock(&dev->lock);
cx231xx_resolution_set(dev);
/* do mode control overrides */
cx231xx_do_mode_ctrl_overrides(dev);
return 0;
}
static const char *iname[] = {
[CX231XX_VMUX_COMPOSITE1] = "Composite1",
[CX231XX_VMUX_SVIDEO] = "S-Video",
[CX231XX_VMUX_TELEVISION] = "Television",
[CX231XX_VMUX_CABLE] = "Cable TV",
[CX231XX_VMUX_DVB] = "DVB",
[CX231XX_VMUX_DEBUG] = "for debug only",
};
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
unsigned int n;
n = i->index;
if (n >= MAX_CX231XX_INPUT)
return -EINVAL;
if (0 == INPUT(n)->type)
return -EINVAL;
i->index = n;
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name, iname[INPUT(n)->type]);
if ((CX231XX_VMUX_TELEVISION == INPUT(n)->type) ||
(CX231XX_VMUX_CABLE == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = dev->vdev->tvnorms;
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
*i = dev->video_input;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if (i >= MAX_CX231XX_INPUT)
return -EINVAL;
if (0 == INPUT(i)->type)
return -EINVAL;
mutex_lock(&dev->lock);
video_mux(dev, i);
mutex_unlock(&dev->lock);
return 0;
}
static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
switch (a->index) {
case CX231XX_AMUX_VIDEO:
strcpy(a->name, "Television");
break;
case CX231XX_AMUX_LINE_IN:
strcpy(a->name, "Line In");
break;
default:
return -EINVAL;
}
a->index = dev->ctl_ainput;
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int status = 0;
/* Doesn't allow manual routing */
if (a->index != dev->ctl_ainput)
return -EINVAL;
dev->ctl_ainput = INPUT(a->index)->amux;
status = cx231xx_set_audio_input(dev, dev->ctl_ainput);
return status;
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int id = qc->id;
int i;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
qc->id = v4l2_ctrl_next(ctrl_classes, qc->id);
if (unlikely(qc->id == 0))
return -EINVAL;
memset(qc, 0, sizeof(*qc));
qc->id = id;
if (qc->id < V4L2_CID_BASE || qc->id >= V4L2_CID_LASTP1)
return -EINVAL;
for (i = 0; i < CX231XX_CTLS; i++)
if (cx231xx_ctls[i].v.id == qc->id)
break;
if (i == CX231XX_CTLS) {
*qc = no_ctl;
return 0;
}
*qc = cx231xx_ctls[i].v;
mutex_lock(&dev->lock);
call_all(dev, core, queryctrl, qc);
mutex_unlock(&dev->lock);
if (qc->type)
return 0;
else
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
call_all(dev, core, g_ctrl, ctrl);
mutex_unlock(&dev->lock);
return rc;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
call_all(dev, core, s_ctrl, ctrl);
mutex_unlock(&dev->lock);
return rc;
}
static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Tuner");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
t->signal = 0xffff; /* LOCKED */
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if (0 != t->index)
return -EINVAL;
#if 0
mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
mutex_unlock(&dev->lock);
#endif
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
mutex_lock(&dev->lock);
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = dev->ctl_freq;
call_all(dev, tuner, g_frequency, f);
mutex_unlock(&dev->lock);
return 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if (0 != f->tuner)
return -EINVAL;
if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
return -EINVAL;
if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
return -EINVAL;
/* set pre channel change settings in DIF first */
rc = cx231xx_tuner_pre_channel_change(dev);
mutex_lock(&dev->lock);
dev->ctl_freq = f->frequency;
if (dev->tuner_type == TUNER_XC5000) {
if (dev->cx231xx_set_analog_freq != NULL)
dev->cx231xx_set_analog_freq(dev, f->frequency);
} else
call_all(dev, tuner, s_frequency, f);
mutex_unlock(&dev->lock);
/* set post channel change settings in DIF first */
rc = cx231xx_tuner_post_channel_change(dev);
cx231xx_info("Set New FREQUENCY to %d\n", f->frequency);
return rc;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
-R, --list-registers=type=<host/i2cdrv/i2caddr>,
chip=<chip>[,min=<addr>,max=<addr>]
dump registers from <min> to <max> [VIDIOC_DBG_G_REGISTER]
-r, --set-register=type=<host/i2cdrv/i2caddr>,
chip=<chip>,reg=<addr>,val=<val>
set the register [VIDIOC_DBG_S_REGISTER]
if type == host, then <chip> is the hosts chip ID (default 0)
if type == i2cdrv (default), then <chip> is the I2C driver name or ID
if type == i2caddr, then <chip> is the 7-bit I2C address
*/
static int vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int ret = 0;
u8 value[4] = { 0, 0, 0, 0 };
u32 data = 0;
switch (reg->match.type) {
case V4L2_CHIP_MATCH_HOST:
switch (reg->match.addr) {
case 0: /* Cx231xx - internal registers */
ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
(u16)reg->reg, value, 4);
reg->val = value[0] | value[1] << 8 |
value[2] << 16 | value[3] << 24;
break;
case 1: /* AFE - read byte */
ret = cx231xx_read_i2c_data(dev, AFE_DEVICE_ADDRESS,
(u16)reg->reg, 2, &data, 1);
reg->val = le32_to_cpu(data & 0xff);
break;
case 14: /* AFE - read dword */
ret = cx231xx_read_i2c_data(dev, AFE_DEVICE_ADDRESS,
(u16)reg->reg, 2, &data, 4);
reg->val = le32_to_cpu(data);
break;
case 2: /* Video Block - read byte */
ret = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
(u16)reg->reg, 2, &data, 1);
reg->val = le32_to_cpu(data & 0xff);
break;
case 24: /* Video Block - read dword */
ret = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
(u16)reg->reg, 2, &data, 4);
reg->val = le32_to_cpu(data);
break;
case 3: /* I2S block - read byte */
ret = cx231xx_read_i2c_data(dev,
I2S_BLK_DEVICE_ADDRESS,
(u16)reg->reg, 1,
&data, 1);
reg->val = le32_to_cpu(data & 0xff);
break;
case 34: /* I2S Block - read dword */
ret =
cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS,
(u16)reg->reg, 1, &data, 4);
reg->val = le32_to_cpu(data);
break;
}
return ret < 0 ? ret : 0;
case V4L2_CHIP_MATCH_I2C_DRIVER:
call_all(dev, core, g_register, reg);
return 0;
case V4L2_CHIP_MATCH_I2C_ADDR:
/* Not supported yet */
return -EINVAL;
default:
if (!v4l2_chip_match_host(®->match))
return -EINVAL;
}
mutex_lock(&dev->lock);
call_all(dev, core, g_register, reg);
mutex_unlock(&dev->lock);
return ret;
}
static int vidioc_s_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int ret = 0;
__le64 buf;
u32 value;
u8 data[4] = { 0, 0, 0, 0 };
buf = cpu_to_le64(reg->val);
switch (reg->match.type) {
case V4L2_CHIP_MATCH_HOST:
{
value = (u32) buf & 0xffffffff;
switch (reg->match.addr) {
case 0: /* cx231xx internal registers */
data[0] = (u8) value;
data[1] = (u8) (value >> 8);
data[2] = (u8) (value >> 16);
data[3] = (u8) (value >> 24);
ret = cx231xx_write_ctrl_reg(dev,
VRT_SET_REGISTER,
(u16)reg->reg, data,
4);
break;
case 1: /* AFE - read byte */
ret = cx231xx_write_i2c_data(dev,
AFE_DEVICE_ADDRESS,
(u16)reg->reg, 2,
value, 1);
break;
case 14: /* AFE - read dword */
ret = cx231xx_write_i2c_data(dev,
AFE_DEVICE_ADDRESS,
(u16)reg->reg, 2,
value, 4);
break;
case 2: /* Video Block - read byte */
ret =
cx231xx_write_i2c_data(dev,
VID_BLK_I2C_ADDRESS,
(u16)reg->reg, 2,
value, 1);
break;
case 24: /* Video Block - read dword */
ret =
cx231xx_write_i2c_data(dev,
VID_BLK_I2C_ADDRESS,
(u16)reg->reg, 2,
value, 4);
break;
case 3: /* I2S block - read byte */
ret =
cx231xx_write_i2c_data(dev,
I2S_BLK_DEVICE_ADDRESS,
(u16)reg->reg, 1,
value, 1);
break;
case 34: /* I2S block - read dword */
ret =
cx231xx_write_i2c_data(dev,
I2S_BLK_DEVICE_ADDRESS,
(u16)reg->reg, 1,
value, 4);
break;
}
}
return ret < 0 ? ret : 0;
default:
break;
}
mutex_lock(&dev->lock);
call_all(dev, core, s_register, reg);
mutex_unlock(&dev->lock);
return ret;
}
#endif
static int vidioc_cropcap(struct file *file, void *priv,
struct v4l2_cropcap *cc)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
cc->bounds.left = 0;
cc->bounds.top = 0;
cc->bounds.width = dev->width;
cc->bounds.height = dev->height;
cc->defrect = cc->bounds;
cc->pixelaspect.numerator = 54; /* 4:3 FIXME: remove magic numbers */
cc->pixelaspect.denominator = 59;
return 0;
}
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
rc = res_get(fh);
if (likely(rc >= 0))
rc = videobuf_streamon(&fh->vb_vidq);
call_all(dev, video, s_stream, 1);
mutex_unlock(&dev->lock);
return rc;
}
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
if (type != fh->type)
return -EINVAL;
mutex_lock(&dev->lock);
cx25840_call(dev, video, s_stream, 0);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
mutex_unlock(&dev->lock);
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
strlcpy(cap->driver, "cx231xx", sizeof(cap->driver));
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->version = CX231XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_VBI_CAPTURE |
#if 0
V4L2_CAP_SLICED_VBI_CAPTURE |
#endif
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
if (dev->tuner_type != TUNER_ABSENT)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(format)))
return -EINVAL;
strlcpy(f->description, format[f->index].name, sizeof(f->description));
f->pixelformat = format[f->index].fourcc;
return 0;
}
/* Sliced VBI ioctls */
static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
f->fmt.sliced.service_set = 0;
call_all(dev, video, g_fmt, f);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
mutex_unlock(&dev->lock);
return rc;
}
static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
call_all(dev, video, g_fmt, f);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
return -EINVAL;
return 0;
}
/* RAW VBI ioctls */
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
35468950 : 28636363;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
f->fmt.vbi.offset = 64 * 4;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
f->fmt.vbi.count[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES;
f->fmt.vbi.start[1] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE + 312 : NTSC_VBI_START_LINE + 263;
f->fmt.vbi.count[1] = f->fmt.vbi.count[0];
return 0;
}
static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
if (dev->vbi_stream_on && !fh->stream_on) {
cx231xx_errdev("%s device in use by another fh\n", __func__);
return -EBUSY;
}
f->type = V4L2_BUF_TYPE_VBI_CAPTURE;
f->fmt.vbi.sampling_rate = (dev->norm & V4L2_STD_625_50) ?
35468950 : 28636363;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
f->fmt.vbi.offset = 244;
f->fmt.vbi.flags = 0;
f->fmt.vbi.start[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE : NTSC_VBI_START_LINE;
f->fmt.vbi.count[0] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES;
f->fmt.vbi.start[1] = (dev->norm & V4L2_STD_625_50) ?
PAL_VBI_START_LINE + 312 : NTSC_VBI_START_LINE + 263;
f->fmt.vbi.count[1] = f->fmt.vbi.count[0];
return 0;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
return videobuf_reqbufs(&fh->vb_vidq, rb);
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
return videobuf_querybuf(&fh->vb_vidq, b);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
return videobuf_qbuf(&fh->vb_vidq, b);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
struct cx231xx_fh *fh = priv;
return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8);
}
#endif
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
strlcpy(cap->driver, "cx231xx", sizeof(cap->driver));
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->version = CX231XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
if (unlikely(t->index > 0))
return -EINVAL;
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
mutex_unlock(&dev->lock);
return 0;
}
static int radio_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
if (i->index != 0)
return -EINVAL;
strcpy(i->name, "Radio");
i->type = V4L2_INPUT_TYPE_TUNER;
return 0;
}
static int radio_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
if (unlikely(a->index))
return -EINVAL;
strcpy(a->name, "Radio");
return 0;
}
static int radio_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
if (0 != t->index)
return -EINVAL;
mutex_lock(&dev->lock);
call_all(dev, tuner, s_tuner, t);
mutex_unlock(&dev->lock);
return 0;
}
static int radio_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
return 0;
}
static int radio_s_input(struct file *file, void *fh, unsigned int i)
{
return 0;
}
static int radio_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *c)
{
int i;
if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
for (i = 0; i < CX231XX_CTLS; i++)
if (cx231xx_ctls[i].v.id == c->id)
break;
*c = cx231xx_ctls[i].v;
} else
*c = no_ctl;
return 0;
}
/*
* cx231xx_v4l2_open()
* inits the device and starts isoc transfer
*/
static int cx231xx_v4l2_open(struct file *filp)
{
int minor = video_devdata(filp)->minor;
int errCode = 0, radio = 0;
struct cx231xx *dev = NULL;
struct cx231xx_fh *fh;
enum v4l2_buf_type fh_type = 0;
dev = cx231xx_get_device(minor, &fh_type, &radio);
if (NULL == dev)
return -ENODEV;
mutex_lock(&dev->lock);
cx231xx_videodbg("open minor=%d type=%s users=%d\n",
minor, v4l2_type_names[fh_type], dev->users);
#if 0
errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
if (errCode < 0) {
cx231xx_errdev
("Device locked on digital mode. Can't open analog\n");
mutex_unlock(&dev->lock);
return -EBUSY;
}
#endif
fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
if (!fh) {
cx231xx_errdev("cx231xx-video.c: Out of memory?!\n");
mutex_unlock(&dev->lock);
return -ENOMEM;
}
fh->dev = dev;
fh->radio = radio;
fh->type = fh_type;
filp->private_data = fh;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
dev->hscale = 0;
dev->vscale = 0;
/* Power up in Analog TV mode */
cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
#if 0
cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
#endif
cx231xx_resolution_set(dev);
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
/* Needed, since GPIO might have disabled power of
some i2c device */
cx231xx_config_i2c(dev);
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
video_mux(dev, dev->video_input);
}
if (fh->radio) {
cx231xx_videodbg("video_open: setting radio device\n");
/* cx231xx_start_radio(dev); */
call_all(dev, tuner, s_radio);
}
dev->users++;
if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
NULL, &dev->video_mode.slock,
fh->type, V4L2_FIELD_INTERLACED,
sizeof(struct cx231xx_buffer), fh);
if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
NULL, &dev->vbi_mode.slock,
fh->type, V4L2_FIELD_SEQ_TB,
sizeof(struct cx231xx_buffer), fh);
}
mutex_unlock(&dev->lock);
return errCode;
}
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
* called when the device gets disconected or at module unload
*/
void cx231xx_release_analog_resources(struct cx231xx *dev)
{
/*FIXME: I2C IR should be disconnected */
if (dev->radio_dev) {
if (-1 != dev->radio_dev->minor)
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
cx231xx_info("V4L2 device /dev/vbi%d deregistered\n",
dev->vbi_dev->num);
if (-1 != dev->vbi_dev->minor)
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->vdev) {
cx231xx_info("V4L2 device /dev/video%d deregistered\n",
dev->vdev->num);
if (-1 != dev->vdev->minor)
video_unregister_device(dev->vdev);
else
video_device_release(dev->vdev);
dev->vdev = NULL;
}
}
/*
* cx231xx_v4l2_close()
* stops streaming and deallocates all resources allocated by the v4l2
* calls and ioctls
*/
static int cx231xx_v4l2_close(struct file *filp)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
cx231xx_videodbg("users=%d\n", dev->users);
mutex_lock(&dev->lock);
if (res_check(fh))
res_free(fh);
if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
/* the device is already disconnect,
free the remaining resources */
if (dev->state & DEV_DISCONNECTED) {
cx231xx_release_resources(dev);
mutex_unlock(&dev->lock);
kfree(dev);
return 0;
}
/* do this before setting alternate! */
cx231xx_uninit_vbi_isoc(dev);
/* set alternate 0 */
if (!dev->vbi_or_sliced_cc_mode)
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
else
cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
kfree(fh);
dev->users--;
wake_up_interruptible_nr(&dev->open, 1);
mutex_unlock(&dev->lock);
return 0;
}
if (dev->users == 1) {
videobuf_stop(&fh->vb_vidq);
videobuf_mmap_free(&fh->vb_vidq);
/* the device is already disconnect,
free the remaining resources */
if (dev->state & DEV_DISCONNECTED) {
cx231xx_release_resources(dev);
mutex_unlock(&dev->lock);
kfree(dev);
return 0;
}
/* Save some power by putting tuner to sleep */
call_all(dev, core, s_power, 0);
/* do this before setting alternate! */
cx231xx_uninit_isoc(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
/* set alternate 0 */
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
kfree(fh);
dev->users--;
wake_up_interruptible_nr(&dev->open, 1);
mutex_unlock(&dev->lock);
return 0;
}
/*
* cx231xx_v4l2_read()
* will allocate buffers when called for the first time
*/
static ssize_t
cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
(fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
mutex_lock(&dev->lock);
rc = res_get(fh);
mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
return videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
filp->f_flags & O_NONBLOCK);
}
return 0;
}
/*
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table * wait)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
rc = res_get(fh);
mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return POLLERR;
if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
(V4L2_BUF_TYPE_VBI_CAPTURE == fh->type))
return videobuf_poll_stream(filp, &fh->vb_vidq, wait);
else
return POLLERR;
}
/*
* cx231xx_v4l2_mmap()
*/
static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
int rc;
rc = check_dev(dev);
if (rc < 0)
return rc;
mutex_lock(&dev->lock);
rc = res_get(fh);
mutex_unlock(&dev->lock);
if (unlikely(rc < 0))
return rc;
rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
cx231xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end -
(unsigned long)vma->vm_start, rc);
return rc;
}
static const struct v4l2_file_operations cx231xx_v4l_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
.read = cx231xx_v4l2_read,
.poll = cx231xx_v4l2_poll,
.mmap = cx231xx_v4l2_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_try_fmt_vbi_cap = vidioc_try_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = vidioc_try_fmt_vbi_cap,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_cropcap = vidioc_cropcap,
.vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
.vidioc_try_fmt_sliced_vbi_cap = vidioc_try_set_sliced_vbi_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
};
static struct video_device cx231xx_vbi_template;
static const struct video_device cx231xx_video_template = {
.fops = &cx231xx_v4l_fops,
.release = video_device_release,
.ioctl_ops = &video_ioctl_ops,
.minor = -1,
.tvnorms = V4L2_STD_ALL,
.current_norm = V4L2_STD_PAL,
};
static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_querycap = radio_querycap,
.vidioc_g_tuner = radio_g_tuner,
.vidioc_enum_input = radio_enum_input,
.vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
.vidioc_s_audio = radio_s_audio,
.vidioc_s_input = radio_s_input,
.vidioc_queryctrl = radio_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
static struct video_device cx231xx_radio_template = {
.name = "cx231xx-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
.minor = -1,
};
/******************************** usb interface ******************************/
static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
const struct video_device
*template, const char *type_name)
{
struct video_device *vfd;
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
*vfd = *template;
vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
return vfd;
}
int cx231xx_register_analog_devices(struct cx231xx *dev)
{
int ret;
cx231xx_info("%s: v4l2 driver version %d.%d.%d\n",
dev->name,
(CX231XX_VERSION_CODE >> 16) & 0xff,
(CX231XX_VERSION_CODE >> 8) & 0xff,
CX231XX_VERSION_CODE & 0xff);
/* set default norm */
/*dev->norm = cx231xx_video_template.current_norm; */
dev->width = norm_maxw(dev);
dev->height = norm_maxh(dev);
dev->interlaced = 0;
dev->hscale = 0;
dev->vscale = 0;
/* Analog specific initialization */
dev->format = &format[0];
/* video_mux(dev, dev->video_input); */
/* Audio defaults */
dev->mute = 1;
dev->volume = 0x1f;
/* enable vbi capturing */
/* write code here... */
/* allocate and fill video video_device struct */
dev->vdev = cx231xx_vdev_init(dev, &cx231xx_video_template, "video");
if (!dev->vdev) {
cx231xx_errdev("cannot allocate video_device.\n");
return -ENODEV;
}
/* register v4l2 video video_device */
ret = video_register_device(dev->vdev, VFL_TYPE_GRABBER,
video_nr[dev->devno]);
if (ret) {
cx231xx_errdev("unable to register video device (error=%i).\n",
ret);
return ret;
}
cx231xx_info("%s/0: registered device video%d [v4l2]\n",
dev->name, dev->vdev->num);
/* Initialize VBI template */
memcpy(&cx231xx_vbi_template, &cx231xx_video_template,
sizeof(cx231xx_vbi_template));
strcpy(cx231xx_vbi_template.name, "cx231xx-vbi");
/* Allocate and fill vbi video_device struct */
dev->vbi_dev = cx231xx_vdev_init(dev, &cx231xx_vbi_template, "vbi");
/* register v4l2 vbi video_device */
ret = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->devno]);
if (ret < 0) {
cx231xx_errdev("unable to register vbi device\n");
return ret;
}
cx231xx_info("%s/0: registered device vbi%d\n",
dev->name, dev->vbi_dev->num);
if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
"radio");
if (!dev->radio_dev) {
cx231xx_errdev("cannot allocate video_device.\n");
return -ENODEV;
}
ret = video_register_device(dev->radio_dev, VFL_TYPE_RADIO,
radio_nr[dev->devno]);
if (ret < 0) {
cx231xx_errdev("can't register radio device\n");
return ret;
}
cx231xx_info("Registered radio device as /dev/radio%d\n",
dev->radio_dev->num);
}
cx231xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n",
dev->vdev->num, dev->vbi_dev->num);
return 0;
}
| gpl-2.0 |
entrusc/linux-lcd | drivers/net/usb/qmi_wwan.c | 12 | 19911 | /*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
* The probing code is heavily inspired by cdc_ether, which is:
* Copyright (C) 2003-2005 by David Brownell
* Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
/* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
*
* QMI is wrapped in CDC, using CDC encapsulated commands on the
* control ("master") interface of a two-interface CDC Union
* resembling standard CDC ECM. The devices do not use the control
* interface for any other CDC messages. Most likely because the
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
* Alternatively, control and data functions can be combined in a
* single USB interface.
*
* Handling a protocol like QMI is out of the scope for any driver.
* It is exported as a character device using the cdc-wdm driver as
* a subdriver, enabling userspace applications ("modem managers") to
* handle it.
*
* These devices may alternatively/additionally be configured using AT
* commands on a serial interface
*/
/* driver specific data */
struct qmi_wwan_state {
struct usb_driver *subdriver;
atomic_t pmcount;
unsigned long unused;
struct usb_interface *control;
struct usb_interface *data;
};
/* using a counter to merge subdriver requests with our own into a combined state */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
struct qmi_wwan_state *info = (void *)&dev->data;
int rv = 0;
dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
/* need autopm_get/put here to ensure the usbcore sees the new value */
rv = usb_autopm_get_interface(dev->intf);
if (rv < 0)
goto err;
dev->intf->needs_remote_wakeup = on;
usb_autopm_put_interface(dev->intf);
}
err:
return rv;
}
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
{
struct usbnet *dev = usb_get_intfdata(intf);
/* can be called while disconnecting */
if (!dev)
return 0;
return qmi_wwan_manage_power(dev, on);
}
/* collect all three endpoints and register subdriver */
static int qmi_wwan_register_subdriver(struct usbnet *dev)
{
int rv;
struct usb_driver *subdriver = NULL;
struct qmi_wwan_state *info = (void *)&dev->data;
/* collect bulk endpoints */
rv = usbnet_get_endpoints(dev, info->data);
if (rv < 0)
goto err;
/* update status endpoint if separate control interface */
if (info->control != info->data)
dev->status = &info->control->cur_altsetting->endpoint[0];
/* require interrupt endpoint for subdriver */
if (!dev->status) {
rv = -EINVAL;
goto err;
}
/* for subdriver power management */
atomic_set(&info->pmcount, 0);
/* register subdriver */
subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
goto err;
}
/* prevent usbnet from using status endpoint */
dev->status = NULL;
/* save subdriver struct for suspend/resume wrappers */
info->subdriver = subdriver;
err:
return rv;
}
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status = -1;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
struct usb_cdc_union_desc *cdc_union = NULL;
struct usb_cdc_ether_desc *cdc_ether = NULL;
u32 found = 0;
struct usb_driver *driver = driver_of(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
/* require a single interrupt status endpoint for subdriver */
if (intf->cur_altsetting->desc.bNumEndpoints != 1)
goto err;
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
/* ignore any misplaced descriptors */
if (h->bDescriptorType != USB_DT_CS_INTERFACE)
goto next_desc;
/* buf[2] is CDC descriptor subtype */
switch (buf[2]) {
case USB_CDC_HEADER_TYPE:
if (found & 1 << USB_CDC_HEADER_TYPE) {
dev_dbg(&intf->dev, "extra CDC header\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
goto err;
}
break;
case USB_CDC_UNION_TYPE:
if (found & 1 << USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "extra CDC union\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
goto err;
}
cdc_union = (struct usb_cdc_union_desc *)buf;
break;
case USB_CDC_ETHERNET_TYPE:
if (found & 1 << USB_CDC_ETHERNET_TYPE) {
dev_dbg(&intf->dev, "extra CDC ether\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
goto err;
}
cdc_ether = (struct usb_cdc_ether_desc *)buf;
break;
}
/*
* Remember which CDC functional descriptors we've seen. Works
* for all types we care about, of which USB_CDC_ETHERNET_TYPE
* (0x0f) is the highest numbered
*/
if (buf[2] < 32)
found |= 1 << buf[2];
next_desc:
len -= h->bLength;
buf += h->bLength;
}
/* did we find all the required ones? */
if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
!(found & (1 << USB_CDC_UNION_TYPE))) {
dev_err(&intf->dev, "CDC functional descriptors missing\n");
goto err;
}
/* verify CDC Union */
if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
goto err;
}
/* need to save these for unbind */
info->control = intf;
info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
if (!info->data) {
dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
goto err;
}
/* errors aren't fatal - we can live with the dynamic address */
if (cdc_ether) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
/* claim data interface and set it up */
status = usb_driver_claim_interface(driver, info->data, dev);
if (status < 0)
goto err;
status = qmi_wwan_register_subdriver(dev);
if (status < 0) {
usb_set_intfdata(info->data, NULL);
usb_driver_release_interface(driver, info->data);
}
err:
return status;
}
/* Some devices combine the "control" and "data" functions into a
* single interface with all three endpoints: interrupt + bulk in and
* out
*/
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
struct qmi_wwan_state *info = (void *)&dev->data;
/* control and data is shared */
info->control = intf;
info->data = intf;
return qmi_wwan_register_subdriver(dev);
}
static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct qmi_wwan_state *info = (void *)&dev->data;
struct usb_driver *driver = driver_of(intf);
struct usb_interface *other;
if (info->subdriver && info->subdriver->disconnect)
info->subdriver->disconnect(info->control);
/* allow user to unbind using either control or data */
if (intf == info->control)
other = info->data;
else
other = info->control;
/* only if not shared */
if (other && intf != other) {
usb_set_intfdata(other, NULL);
usb_driver_release_interface(driver, other);
}
info->subdriver = NULL;
info->data = NULL;
info->control = NULL;
}
/* suspend/resume wrappers calling both usbnet and the cdc-wdm
* subdriver if present.
*
* NOTE: cdc-wdm also supports pre/post_reset, but we cannot provide
* wrappers for those without adding usbnet reset support first.
*/
static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto err;
if (intf == info->control && info->subdriver && info->subdriver->suspend)
ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
err:
return ret;
}
static int qmi_wwan_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
if (callsub)
ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
if (ret < 0 && callsub && info->subdriver->suspend)
info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
static const struct driver_info qmi_wwan_info = {
.description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_shared = {
.description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
#define QMI_FIXED_INTF(vend, prod, num) \
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_shared
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 3)
/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Novatel USB551L and MC551 */
USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0xb001,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Novatel E362 */
USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9010,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Huawei E392, E398 and possibly others in "Windows mode" */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* Pantech UML290 - newer firmware */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
.driver_info = (unsigned long)&qmi_wwan_shared,
},
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0042, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0049, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0052, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
{QMI_FIXED_INTF(0x19d2, 0x0058, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
{QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
{QMI_FIXED_INTF(0x19d2, 0x0113, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0118, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0121, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0123, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0124, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0125, 6)},
{QMI_FIXED_INTF(0x19d2, 0x0126, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0130, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0133, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0141, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */
{QMI_FIXED_INTF(0x19d2, 0x0158, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
{QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
{QMI_FIXED_INTF(0x19d2, 0x1021, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1245, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1247, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1252, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1254, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
/* 5. Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
{QMI_GOBI_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
{QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
{QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
{QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
* reimplemented here by using a magic "blacklist" value
* instead of 0 in the static device id table
*/
if (!id->driver_info) {
dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
id->driver_info = (unsigned long)&qmi_wwan_shared;
}
return usbnet_probe(intf, id);
}
static struct usb_driver qmi_wwan_driver = {
.name = "qmi_wwan",
.id_table = products,
.probe = qmi_wwan_probe,
.disconnect = usbnet_disconnect,
.suspend = qmi_wwan_suspend,
.resume = qmi_wwan_resume,
.reset_resume = qmi_wwan_resume,
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
module_usb_driver(qmi_wwan_driver);
MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
dh-electronics/linux-am35x | arch/mips/cavium-octeon/setup.c | 12 | 21964 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2007 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/serial.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/string.h> /* for memset */
#include <linux/tty.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
#include <asm/system.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-mio-defs.h>
#ifdef CONFIG_CAVIUM_DECODE_RSL
extern void cvmx_interrupt_rsl_decode(void);
extern int __cvmx_interrupt_ecc_report_single_bit_errors;
extern void cvmx_interrupt_rsl_enable(void);
#endif
extern struct plat_smp_ops octeon_smp_ops;
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
static unsigned long long MAX_MEMORY = 512ull << 20;
struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
#ifdef CONFIG_CAVIUM_RESERVE32
uint64_t octeon_reserve32_memory;
EXPORT_SYMBOL(octeon_reserve32_memory);
#endif
static int octeon_uart;
extern asmlinkage void handle_int(void);
extern asmlinkage void plat_irq_dispatch(void);
/**
* Return non zero if we are currently running in the Octeon simulator
*
* Returns
*/
int octeon_is_simulation(void)
{
return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
}
EXPORT_SYMBOL(octeon_is_simulation);
/**
* Return true if Octeon is in PCI Host mode. This means
* Linux can control the PCI bus.
*
* Returns Non zero if Octeon in host mode.
*/
int octeon_is_pci_host(void)
{
#ifdef CONFIG_PCI
return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
#else
return 0;
#endif
}
/**
* Get the clock rate of Octeon
*
* Returns Clock rate in HZ
*/
uint64_t octeon_get_clock_rate(void)
{
struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
return sysinfo->cpu_clock_hz;
}
EXPORT_SYMBOL(octeon_get_clock_rate);
static u64 octeon_io_clock_rate;
u64 octeon_get_io_clock_rate(void)
{
return octeon_io_clock_rate;
}
EXPORT_SYMBOL(octeon_get_io_clock_rate);
/**
* Write to the LCD display connected to the bootbus. This display
* exists on most Cavium evaluation boards. If it doesn't exist, then
* this function doesn't do anything.
*
* @s: String to write
*/
void octeon_write_lcd(const char *s)
{
if (octeon_bootinfo->led_display_base_addr) {
void __iomem *lcd_address =
ioremap_nocache(octeon_bootinfo->led_display_base_addr,
8);
int i;
for (i = 0; i < 8; i++, s++) {
if (*s)
iowrite8(*s, lcd_address + i);
else
iowrite8(' ', lcd_address + i);
}
iounmap(lcd_address);
}
}
/**
* Return the console uart passed by the bootloader
*
* Returns uart (0 or 1)
*/
int octeon_get_boot_uart(void)
{
int uart;
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
uart = 1;
#else
uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
#endif
return uart;
}
/**
* Get the coremask Linux was booted on.
*
* Returns Core mask
*/
int octeon_get_boot_coremask(void)
{
return octeon_boot_desc_ptr->core_mask;
}
/**
* Check the hardware BIST results for a CPU
*/
void octeon_check_cpu_bist(void)
{
const int coreid = cvmx_get_core_num();
unsigned long long mask;
unsigned long long bist_val;
/* Check BIST results for COP0 registers */
mask = 0x1f00000000ull;
bist_val = read_octeon_c0_icacheerr();
if (bist_val & mask)
pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
coreid, bist_val);
bist_val = read_octeon_c0_dcacheerr();
if (bist_val & 1)
pr_err("Core%d L1 Dcache parity error: "
"CacheErr(dcache) = 0x%llx\n",
coreid, bist_val);
mask = 0xfc00000000000000ull;
bist_val = read_c0_cvmmemctl();
if (bist_val & mask)
pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
coreid, bist_val);
write_octeon_c0_dcacheerr(0);
}
/**
* Reboot Octeon
*
* @command: Command to pass to the bootloader. Currently ignored.
*/
static void octeon_restart(char *command)
{
/* Disable all watchdogs before soft reset. They don't get cleared */
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
#else
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
#endif
mb();
while (1)
cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
}
/**
* Permanently stop a core.
*
* @arg: Ignored.
*/
static void octeon_kill_core(void *arg)
{
mb();
if (octeon_is_simulation()) {
/* The simulator needs the watchdog to stop for dead cores */
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
/* A break instruction causes the simulator stop a core */
asm volatile ("sync\nbreak");
}
}
/**
* Halt the system
*/
static void octeon_halt(void)
{
smp_call_function(octeon_kill_core, NULL, 0);
switch (octeon_bootinfo->board_type) {
case CVMX_BOARD_TYPE_NAO38:
/* Driving a 1 to GPIO 12 shuts off this board */
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
break;
default:
octeon_write_lcd("PowerOff");
break;
}
octeon_kill_core(NULL);
}
/**
* Handle all the error condition interrupts that might occur.
*
*/
#ifdef CONFIG_CAVIUM_DECODE_RSL
static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
{
cvmx_interrupt_rsl_decode();
return IRQ_HANDLED;
}
#endif
static char __read_mostly octeon_system_type[80];
static int __init init_octeon_system_type(void)
{
snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
cvmx_board_type_to_string(octeon_bootinfo->board_type),
octeon_model_get_string(read_c0_prid()));
return 0;
}
early_initcall(init_octeon_system_type);
/**
* Return a string representing the system type
*
* Returns
*/
const char *octeon_board_type_string(void)
{
return octeon_system_type;
}
const char *get_system_type(void)
__attribute__ ((alias("octeon_board_type_string")));
void octeon_user_io_init(void)
{
union octeon_cvmemctl cvmmemctl;
union cvmx_iob_fau_timeout fau_timeout;
union cvmx_pow_nw_tim nm_tim;
/* Get the current settings for CP0_CVMMEMCTL_REG */
cvmmemctl.u64 = read_c0_cvmmemctl();
/* R/W If set, marked write-buffer entries time out the same
* as as other entries; if clear, marked write-buffer entries
* use the maximum timeout. */
cvmmemctl.s.dismarkwblongto = 1;
/* R/W If set, a merged store does not clear the write-buffer
* entry timeout state. */
cvmmemctl.s.dismrgclrwbto = 0;
/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
* word location for an IOBDMA. The other 8 bits come from the
* SCRADDR field of the IOBDMA. */
cvmmemctl.s.iobdmascrmsb = 0;
/* R/W If set, SYNCWS and SYNCS only order marked stores; if
* clear, SYNCWS and SYNCS only order unmarked
* stores. SYNCWSMARKED has no effect when DISSYNCWS is
* set. */
cvmmemctl.s.syncwsmarked = 0;
/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
cvmmemctl.s.dissyncws = 0;
/* R/W If set, no stall happens on write buffer full. */
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
cvmmemctl.s.diswbfst = 1;
else
cvmmemctl.s.diswbfst = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with <48>==0 */
cvmmemctl.s.xkmemenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==0 */
cvmmemctl.s.xkmemenau = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenau = 0;
/* R/W If set, all stores act as SYNCW (NOMERGE must be set
* when this is set) RW, reset to 0. */
cvmmemctl.s.allsyncw = 0;
/* R/W If set, no stores merge, and all stores reach the
* coherent bus in order. */
cvmmemctl.s.nomerge = 0;
/* R/W Selects the bit in the counter used for DID time-outs 0
* = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
* between 1x and 2x this interval. For example, with
* DIDTTO=3, expiration interval is between 16K and 32K. */
cvmmemctl.s.didtto = 0;
/* R/W If set, the (mem) CSR clock never turns off. */
cvmmemctl.s.csrckalwys = 0;
/* R/W If set, mclk never turns off. */
cvmmemctl.s.mclkalwys = 0;
/* R/W Selects the bit in the counter used for write buffer
* flush time-outs (WBFLT+11) is the bit position in an
* internal counter used to determine expiration. The write
* buffer expires between 1x and 2x this interval. For
* example, with WBFLT = 0, a write buffer expires between 2K
* and 4K cycles after the write buffer entry is allocated. */
cvmmemctl.s.wbfltime = 0;
/* R/W If set, do not put Istream in the L2 cache. */
cvmmemctl.s.istrnol2 = 0;
/*
* R/W The write buffer threshold. As per erratum Core-14752
* for CN63XX, a sc/scd might fail if the write buffer is
* full. Lowering WBTHRESH greatly lowers the chances of the
* write buffer ever being full and triggering the erratum.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
cvmmemctl.s.wbthresh = 4;
else
cvmmemctl.s.wbthresh = 10;
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
cvmmemctl.s.cvmsegenak = 1;
#else
cvmmemctl.s.cvmsegenak = 0;
#endif
/* R/W If set, CVMSEG is available for loads/stores in
* supervisor mode. */
cvmmemctl.s.cvmsegenas = 0;
/* R/W If set, CVMSEG is available for loads/stores in user
* mode. */
cvmmemctl.s.cvmsegenau = 0;
/* R/W Size of local memory in cache blocks, 54 (6912 bytes)
* is max legal value. */
cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
write_c0_cvmmemctl(cvmmemctl.u64);
if (smp_processor_id() == 0)
pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
/* Set a default for the hardware timeouts */
fau_timeout.u64 = 0;
fau_timeout.s.tout_val = 0xfff;
/* Disable tagwait FAU timeout */
fau_timeout.s.tout_enb = 0;
cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
nm_tim.u64 = 0;
/* 4096 cycles */
nm_tim.s.nw_tim = 3;
cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
write_octeon_c0_icacheerr(0);
write_c0_derraddr1(0);
}
/**
* Early entry point for arch setup
*/
void __init prom_init(void)
{
struct cvmx_sysinfo *sysinfo;
int i;
int argc;
#ifdef CONFIG_CAVIUM_RESERVE32
int64_t addr = -1;
#endif
/*
* The bootloader passes a pointer to the boot descriptor in
* $a3, this is available as fw_arg3.
*/
octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
octeon_bootinfo =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
sysinfo = cvmx_sysinfo_get();
memset(sysinfo, 0, sizeof(*sysinfo));
sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
sysinfo->phy_mem_desc_ptr =
cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
sysinfo->core_mask = octeon_bootinfo->core_mask;
sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
sysinfo->board_type = octeon_bootinfo->board_type;
sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
sizeof(sysinfo->mac_addr_base));
sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
memcpy(sysinfo->board_serial_number,
octeon_bootinfo->board_serial_number,
sizeof(sysinfo->board_serial_number));
sysinfo->compact_flash_common_base_addr =
octeon_bootinfo->compact_flash_common_base_addr;
sysinfo->compact_flash_attribute_base_addr =
octeon_bootinfo->compact_flash_attribute_base_addr;
sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* I/O clock runs at a different rate than the CPU. */
union cvmx_mio_rst_boot rst_boot;
rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
} else {
octeon_io_clock_rate = sysinfo->cpu_clock_hz;
}
/*
* Only enable the LED controller if we're running on a CN38XX, CN58XX,
* or CN56XX. The CN30XX and CN31XX don't have an LED controller.
*/
if (!octeon_is_simulation() &&
octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
cvmx_write_csr(CVMX_LED_EN, 0);
cvmx_write_csr(CVMX_LED_PRT, 0);
cvmx_write_csr(CVMX_LED_DBG, 0);
cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
cvmx_write_csr(CVMX_LED_EN, 1);
}
#ifdef CONFIG_CAVIUM_RESERVE32
/*
* We need to temporarily allocate all memory in the reserve32
* region. This makes sure the kernel doesn't allocate this
* memory when it is getting memory from the
* bootloader. Later, after the memory allocations are
* complete, the reserve32 will be freed.
*
* Allocate memory for RESERVED32 aligned on 2MB boundary. This
* is in case we later use hugetlb entries with it.
*/
addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
0, 0, 2 << 20,
"CAVIUM_RESERVE32", 0);
if (addr < 0)
pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
else
octeon_reserve32_memory = addr;
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
pr_info("Skipping L2 locking due to reduced L2 cache size\n");
} else {
uint32_t ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
/* TLB refill */
cvmx_l2c_lock_mem_region(ebase, 0x100);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
/* General exception */
cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
/* Interrupt handler */
cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
#endif
}
#endif
octeon_check_cpu_bist();
octeon_uart = octeon_get_boot_uart();
#ifdef CONFIG_SMP
octeon_write_lcd("LinuxSMP");
#else
octeon_write_lcd("Linux");
#endif
#ifdef CONFIG_CAVIUM_GDB
/*
* When debugging the linux kernel, force the cores to enter
* the debug exception handler to break in.
*/
if (octeon_get_boot_debug_flag()) {
cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
cvmx_read_csr(CVMX_CIU_DINT);
}
#endif
/*
* BIST should always be enabled when doing a soft reset. L2
* Cache locking for instance is not cleared unless BIST is
* enabled. Unfortunately due to a chip errata G-200 for
* Cn38XX and CN31XX, BIST msut be disabled on these parts.
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
OCTEON_IS_MODEL(OCTEON_CN31XX))
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
else
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
/* Default to 64MB in the simulator to speed things up */
if (octeon_is_simulation())
MAX_MEMORY = 64ull << 20;
arcs_cmdline[0] = 0;
argc = octeon_boot_desc_ptr->argc;
for (i = 0; i < argc; i++) {
const char *arg =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if ((strncmp(arg, "MEM=", 4) == 0) ||
(strncmp(arg, "mem=", 4) == 0)) {
sscanf(arg + 4, "%llu", &MAX_MEMORY);
MAX_MEMORY <<= 20;
if (MAX_MEMORY == 0)
MAX_MEMORY = 32ull << 30;
} else if (strcmp(arg, "ecc_verbose") == 0) {
#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
__cvmx_interrupt_ecc_report_single_bit_errors = 1;
pr_notice("Reporting of single bit ECC errors is "
"turned on\n");
#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
sizeof(arcs_cmdline) - 1) {
strcat(arcs_cmdline, " ");
strcat(arcs_cmdline, arg);
}
}
if (strstr(arcs_cmdline, "console=") == NULL) {
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
strcat(arcs_cmdline, " console=ttyS0,115200");
#else
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
#endif
}
if (octeon_is_simulation()) {
/*
* The simulator uses a mtdram device pre filled with
* the filesystem. Also specify the calibration delay
* to avoid calculating it every time.
*/
strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824");
}
mips_hpt_frequency = octeon_get_clock_rate();
octeon_init_cvmcount();
octeon_setup_delays();
_machine_restart = octeon_restart;
_machine_halt = octeon_halt;
octeon_user_io_init();
register_smp_ops(&octeon_smp_ops);
}
/* Exclude a single page from the regions obtained in plat_mem_setup. */
static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
{
if (addr > *mem && addr < *mem + *size) {
u64 inc = addr - *mem;
add_memory_region(*mem, inc, BOOT_MEM_RAM);
*mem += inc;
*size -= inc;
}
if (addr == *mem && *size > PAGE_SIZE) {
*mem += PAGE_SIZE;
*size -= PAGE_SIZE;
}
}
void __init plat_mem_setup(void)
{
uint64_t mem_alloc_size;
uint64_t total;
int64_t memory;
total = 0;
/* First add the init memory we will be returning. */
memory = __pa_symbol(&__init_begin) & PAGE_MASK;
mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
if (mem_alloc_size > 0) {
add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
total += mem_alloc_size;
}
/*
* The Mips memory init uses the first memory location for
* some memory vectors. When SPARSEMEM is in use, it doesn't
* verify that the size is big enough for the final
* vectors. Making the smallest chuck 4MB seems to be enough
* to consistently work.
*/
mem_alloc_size = 4 << 20;
if (mem_alloc_size > MAX_MEMORY)
mem_alloc_size = MAX_MEMORY;
/*
* When allocating memory, we want incrementing addresses from
* bootmem_alloc so the code in add_memory_region can merge
* regions next to each other.
*/
cvmx_bootmem_lock();
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
&& (total < MAX_MEMORY)) {
#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
__pa_symbol(&__init_end), -1,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#elif defined(CONFIG_HIGHMEM)
memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#else
memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#endif
if (memory >= 0) {
u64 size = mem_alloc_size;
/*
* exclude a page at the beginning and end of
* the 256MB PCIe 'hole' so the kernel will not
* try to allocate multi-page buffers that
* span the discontinuity.
*/
memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
&memory, &size);
memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
CVMX_PCIE_BAR1_PHYS_SIZE,
&memory, &size);
/*
* This function automatically merges address
* regions next to each other if they are
* received in incrementing order.
*/
if (size)
add_memory_region(memory, size, BOOT_MEM_RAM);
total += mem_alloc_size;
} else {
break;
}
}
cvmx_bootmem_unlock();
#ifdef CONFIG_CAVIUM_RESERVE32
/*
* Now that we've allocated the kernel memory it is safe to
* free the reserved region. We free it here so that builtin
* drivers can use the memory.
*/
if (octeon_reserve32_memory)
cvmx_bootmem_free_named("CAVIUM_RESERVE32");
#endif /* CONFIG_CAVIUM_RESERVE32 */
if (total == 0)
panic("Unable to allocate memory from "
"cvmx_bootmem_phy_alloc\n");
}
/*
* Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
int prom_putchar(char c)
{
uint64_t lsrval;
/* Spin until there is room */
do {
lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
} while ((lsrval & 0x20) == 0);
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
return 1;
}
EXPORT_SYMBOL(prom_putchar);
void prom_free_prom_memory(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
/* Check for presence of Core-14449 fix. */
u32 insn;
u32 *foo;
foo = &insn;
asm volatile("# before" : : : "memory");
prefetch(foo);
asm volatile(
".set push\n\t"
".set noreorder\n\t"
"bal 1f\n\t"
"nop\n"
"1:\tlw %0,-12($31)\n\t"
".set pop\n\t"
: "=r" (insn) : : "$31", "memory");
if ((insn >> 26) != 0x33)
panic("No PREF instruction at Core-14449 probe point.\n");
if (((insn >> 16) & 0x1f) != 28)
panic("Core-14449 WAR not in place (%04x).\n"
"Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).\n", insn);
}
#ifdef CONFIG_CAVIUM_DECODE_RSL
cvmx_interrupt_rsl_enable();
/* Add an interrupt handler for general failures. */
if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
"RML/RSL", octeon_rlm_interrupt)) {
panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
}
#endif
}
| gpl-2.0 |
zarboz/Beastmode.Evita.2.0 | drivers/video/msm/vidc/common/vcd/vcd_client_sm.c | 12 | 42199 | /* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <media/msm/vidc_type.h>
#include "vcd.h"
static const struct vcd_clnt_state_table *vcd_clnt_state_table[];
void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt,
u32 event)
{
if (cctxt->clnt_state.state == VCD_CLIENT_STATE_NULL) {
cctxt->callback(VCD_EVT_RESP_OPEN, VCD_ERR_HW_FATAL, NULL, 0,
cctxt, cctxt->client_data);
vcd_destroy_client_context(cctxt);
return;
}
if (event == VCD_EVT_RESP_BASE)
event = VCD_EVT_IND_HWERRFATAL;
if (cctxt->clnt_state.state != VCD_CLIENT_STATE_INVALID) {
cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0,
cctxt, cctxt->client_data);
vcd_flush_buffers_in_err_fatal(cctxt);
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_INVALID,
CLIENT_STATE_EVENT_NUMBER(clnt_cb));
}
}
static u32 vcd_close_in_open(struct vcd_clnt_ctxt *cctxt)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_close_in_open:");
if (cctxt->in_buf_pool.allocated ||
cctxt->out_buf_pool.allocated) {
VCD_MSG_ERROR("\n Allocated buffers are not freed yet");
return VCD_ERR_ILLEGAL_OP;
}
vcd_destroy_client_context(cctxt);
return rc;
}
static u32 vcd_close_in_invalid(struct vcd_clnt_ctxt *cctxt)
{
VCD_MSG_LOW("vcd_close_in_invalid:");
if (cctxt->in_buf_pool.allocated ||
cctxt->out_buf_pool.allocated){
VCD_MSG_ERROR("Allocated buffers are not freed yet");
return VCD_ERR_ILLEGAL_OP;
}
if (cctxt->status.mask & VCD_CLEANING_UP)
cctxt->status.mask |= VCD_CLOSE_PENDING;
else
vcd_destroy_client_context(cctxt);
return VCD_S_SUCCESS;
}
static u32 vcd_start_in_run_cmn(struct vcd_clnt_ctxt *cctxt)
{
VCD_MSG_LOW("vcd_start_in_run_cmn:");
cctxt->callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0,
cctxt, cctxt->client_data);
return VCD_S_SUCCESS;
}
static u32 vcd_encode_start_in_open(struct vcd_clnt_ctxt *cctxt)
{
u32 rc = VCD_S_SUCCESS;
struct vcd_property_hdr prop_hdr;
struct vcd_property_vop_timing timing;
VCD_MSG_LOW("vcd_encode_start_in_open:");
if (cctxt->decoding) {
VCD_MSG_ERROR("vcd_encode_init for decoder client");
return VCD_ERR_ILLEGAL_OP;
}
if ((!cctxt->meta_mode && !cctxt->in_buf_pool.entries) ||
!cctxt->out_buf_pool.entries ||
(!cctxt->meta_mode &&
cctxt->in_buf_pool.validated != cctxt->in_buf_pool.count) ||
cctxt->out_buf_pool.validated !=
cctxt->out_buf_pool.count) {
VCD_MSG_HIGH("%s: Buffer pool is not completely setup yet",
__func__);
}
rc = vcd_sched_add_client(cctxt);
VCD_FAILED_RETURN(rc, "Failed: vcd_sched_add_client");
prop_hdr.prop_id = VCD_I_VOP_TIMING;
prop_hdr.sz = sizeof(struct vcd_property_vop_timing);
rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &timing);
VCD_FAILED_RETURN(rc, "Failed: Get VCD_I_VOP_TIMING");
if (!timing.vop_time_resolution) {
VCD_MSG_ERROR("Vop_time_resolution value is zero");
return VCD_ERR_FAIL;
}
cctxt->time_resoln = timing.vop_time_resolution;
rc = vcd_process_cmd_sess_start(cctxt);
if (!VCD_FAILED(rc)) {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_STARTING,
CLIENT_STATE_EVENT_NUMBER
(encode_start));
}
return rc;
}
static u32 vcd_encode_start_in_run(struct vcd_clnt_ctxt
*cctxt)
{
VCD_MSG_LOW("vcd_encode_start_in_run:");
(void) vcd_start_in_run_cmn(cctxt);
return VCD_S_SUCCESS;
}
static u32 vcd_encode_frame_cmn(struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *input_frame)
{
VCD_MSG_LOW("vcd_encode_frame_cmn in %d:", cctxt->clnt_state.state);
if (cctxt->decoding) {
VCD_MSG_ERROR("vcd_encode_frame for decoder client");
return VCD_ERR_ILLEGAL_OP;
}
return vcd_handle_input_frame(cctxt, input_frame);
}
static u32 vcd_decode_start_in_open
(struct vcd_clnt_ctxt *cctxt,
struct vcd_sequence_hdr *seq_hdr)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_decode_start_in_open:");
if (!cctxt->decoding) {
VCD_MSG_ERROR("vcd_decode_init for encoder client");
return VCD_ERR_ILLEGAL_OP;
}
if (seq_hdr) {
VCD_MSG_HIGH("Seq hdr supplied. len = %d",
seq_hdr->sequence_header_len);
rc = vcd_store_seq_hdr(cctxt, seq_hdr);
} else {
VCD_MSG_HIGH("Seq hdr not supplied");
cctxt->seq_hdr.sequence_header_len = 0;
cctxt->seq_hdr.sequence_header = NULL;
}
VCD_FAILED_RETURN(rc, "Err processing seq hdr");
rc = vcd_process_cmd_sess_start(cctxt);
if (!VCD_FAILED(rc)) {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_STARTING,
CLIENT_STATE_EVENT_NUMBER
(decode_start));
}
return rc;
}
static u32 vcd_decode_start_in_run(struct vcd_clnt_ctxt *cctxt,
struct vcd_sequence_hdr *seqhdr)
{
VCD_MSG_LOW("vcd_decode_start_in_run:");
(void) vcd_start_in_run_cmn(cctxt);
return VCD_S_SUCCESS;
}
static u32 vcd_decode_frame_cmn
(struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *input_frame)
{
VCD_MSG_LOW("vcd_decode_frame_cmn in %d:", cctxt->clnt_state.state);
if (!cctxt->decoding) {
VCD_MSG_ERROR("Decode_frame api called for Encoder client");
return VCD_ERR_ILLEGAL_OP;
}
return vcd_handle_input_frame(cctxt, input_frame);
}
static u32 vcd_pause_in_run(struct vcd_clnt_ctxt *cctxt)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_pause_in_run:");
if (cctxt->sched_clnt_hdl) {
rc = vcd_sched_suspend_resume_clnt(cctxt, false);
VCD_FAILED_RETURN(rc, "Failed: vcd_sched_suspend_resume_clnt");
}
if (cctxt->status.frame_submitted > 0) {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_PAUSING,
CLIENT_STATE_EVENT_NUMBER
(pause));
} else {
VCD_MSG_HIGH("No client frames are currently being processed");
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_PAUSED,
CLIENT_STATE_EVENT_NUMBER
(pause));
cctxt->callback(VCD_EVT_RESP_PAUSE,
VCD_S_SUCCESS,
NULL, 0, cctxt, cctxt->client_data);
rc = vcd_power_event(cctxt->dev_ctxt, cctxt,
VCD_EVT_PWR_CLNT_PAUSE);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_PAUSE_END failed");
}
return VCD_S_SUCCESS;
}
static u32 vcd_resume_in_paused(struct vcd_clnt_ctxt *cctxt)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_resume_in_paused:");
if (cctxt->sched_clnt_hdl) {
rc = vcd_power_event(cctxt->dev_ctxt,
cctxt, VCD_EVT_PWR_CLNT_RESUME);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_RESUME failed");
} else {
rc = vcd_sched_suspend_resume_clnt(cctxt, true);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR
("rc = 0x%x. Failed: "
"vcd_sched_suspend_resume_clnt",
rc);
}
}
if (!VCD_FAILED(rc)) {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_RUN,
CLIENT_STATE_EVENT_NUMBER
(resume));
vcd_try_submit_frame(dev_ctxt);
}
} else {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_RUN,
CLIENT_STATE_EVENT_NUMBER
(resume));
}
return rc;
}
static u32 vcd_flush_cmn(struct vcd_clnt_ctxt *cctxt, u32 mode)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_flush_cmn in %d:", cctxt->clnt_state.state);
rc = vcd_flush_buffers(cctxt, mode);
VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers");
if (cctxt->status.frame_submitted > 0) {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_FLUSHING,
CLIENT_STATE_EVENT_NUMBER
(flush));
} else {
VCD_MSG_HIGH("All buffers are flushed");
cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
}
return rc;
}
static u32 vcd_flush_inopen(struct vcd_clnt_ctxt *cctxt,
u32 mode)
{
VCD_MSG_LOW("vcd_flush_inopen:");
cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
return VCD_S_SUCCESS;
}
static u32 vcd_flush_in_flushing
(struct vcd_clnt_ctxt *cctxt, u32 mode)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_flush_in_flushing:");
rc = vcd_flush_buffers(cctxt, mode);
return rc;
}
static u32 vcd_flush_in_eos(struct vcd_clnt_ctxt *cctxt,
u32 mode)
{
VCD_MSG_LOW("vcd_flush_in_eos:");
if (mode > VCD_FLUSH_ALL || !mode) {
VCD_MSG_ERROR("Invalid flush mode %d", mode);
return VCD_ERR_ILLEGAL_PARM;
}
VCD_MSG_MED("Flush mode requested %d", mode);
cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
return VCD_S_SUCCESS;
}
static u32 vcd_flush_in_invalid(struct vcd_clnt_ctxt *cctxt,
u32 mode)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_flush_in_invalid:");
if (!(cctxt->status.mask & VCD_CLEANING_UP)) {
rc = vcd_flush_buffers(cctxt, mode);
if (!VCD_FAILED(rc)) {
VCD_MSG_HIGH("All buffers are flushed");
cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
}
} else
cctxt->status.mask |= (mode & VCD_FLUSH_ALL);
return rc;
}
static u32 vcd_stop_cmn(struct vcd_clnt_ctxt *cctxt)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
struct vcd_transc *transc;
VCD_MSG_LOW("vcd_stop_cmn in %d:", cctxt->clnt_state.state);
rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers");
if (!cctxt->status.frame_submitted) {
if (vcd_get_command_channel(dev_ctxt, &transc)) {
rc = vcd_power_event(dev_ctxt, cctxt,
VCD_EVT_PWR_CLNT_CMD_BEGIN);
if (!VCD_FAILED(rc)) {
transc->type = VCD_CMD_CODEC_STOP;
transc->cctxt = cctxt;
rc = vcd_submit_cmd_sess_end(transc);
} else {
VCD_MSG_ERROR("Failed:"
" VCD_EVT_PWR_CLNT_CMD_BEGIN");
}
if (VCD_FAILED(rc)) {
vcd_release_command_channel(dev_ctxt,
transc);
}
} else {
vcd_client_cmd_flush_and_en_q(cctxt,
VCD_CMD_CODEC_STOP);
}
}
if (VCD_FAILED(rc)) {
(void)vcd_power_event(dev_ctxt, cctxt,
VCD_EVT_PWR_CLNT_CMD_FAIL);
} else {
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_STOPPING,
CLIENT_STATE_EVENT_NUMBER
(stop));
}
return rc;
}
static u32 vcd_stop_inopen(struct vcd_clnt_ctxt *cctxt)
{
VCD_MSG_LOW("vcd_stop_inopen:");
cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS,
NULL, 0, cctxt,
cctxt->client_data);
return VCD_S_SUCCESS;
}
static u32 vcd_stop_in_run(struct vcd_clnt_ctxt *cctxt)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_stop_in_run:");
rc = vcd_stop_cmn(cctxt);
if (!VCD_FAILED(rc) &&
(cctxt->status.mask & VCD_FIRST_IP_RCVD)) {
rc = vcd_power_event(cctxt->dev_ctxt,
cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME);
}
return rc;
}
static u32 vcd_stop_in_eos(struct vcd_clnt_ctxt *cctxt)
{
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_stop_in_eos:");
if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
rc = vcd_stop_cmn(cctxt);
if (!VCD_FAILED(rc)) {
rc = vcd_power_event(cctxt->dev_ctxt,
cctxt, VCD_EVT_PWR_CLNT_LAST_FRAME);
cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF;
}
} else
cctxt->status.mask |= VCD_STOP_PENDING;
return rc;
}
static u32 vcd_stop_in_invalid(struct vcd_clnt_ctxt *cctxt)
{
VCD_MSG_LOW("vcd_stop_in_invalid:");
if (cctxt->status.mask & VCD_CLEANING_UP) {
cctxt->status.mask |= VCD_STOP_PENDING;
} else {
(void) vcd_flush_buffers(cctxt, VCD_FLUSH_ALL);
cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL,
0, cctxt, cctxt->client_data);
}
return VCD_S_SUCCESS;
}
static u32 vcd_set_property_cmn
(struct vcd_clnt_ctxt *cctxt,
struct vcd_property_hdr *prop_hdr, void *prop_val)
{
u32 rc;
VCD_MSG_LOW("vcd_set_property_cmn in %d:", cctxt->clnt_state.state);
VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id);
if (!prop_hdr->sz || !prop_hdr->prop_id) {
VCD_MSG_MED("Bad parameters");
return VCD_ERR_ILLEGAL_PARM;
}
rc = ddl_set_property(cctxt->ddl_handle, prop_hdr, prop_val);
VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
switch (prop_hdr->prop_id) {
case VCD_I_META_BUFFER_MODE:
{
struct vcd_property_live *live =
(struct vcd_property_live *)prop_val;
cctxt->meta_mode = live->live;
break;
}
case VCD_I_LIVE:
{
struct vcd_property_live *live =
(struct vcd_property_live *)prop_val;
cctxt->live = live->live;
break;
}
case VCD_I_FRAME_RATE:
{
if (cctxt->sched_clnt_hdl) {
rc = vcd_set_frame_rate(cctxt,
(struct vcd_property_frame_rate *)
prop_val);
}
break;
}
case VCD_I_FRAME_SIZE:
{
if (cctxt->sched_clnt_hdl) {
rc = vcd_set_frame_size(cctxt,
(struct vcd_property_frame_size *)
prop_val);
}
break;
}
case VCD_I_INTRA_PERIOD:
{
struct vcd_property_i_period *iperiod =
(struct vcd_property_i_period *)prop_val;
cctxt->bframe = iperiod->b_frames;
break;
}
case VCD_REQ_PERF_LEVEL:
rc = vcd_req_perf_level(cctxt,
(struct vcd_property_perf_level *)prop_val);
break;
default:
{
break;
}
}
return rc;
}
static u32 vcd_get_property_cmn
(struct vcd_clnt_ctxt *cctxt,
struct vcd_property_hdr *prop_hdr, void *prop_val)
{
VCD_MSG_LOW("vcd_get_property_cmn in %d:", cctxt->clnt_state.state);
VCD_MSG_LOW("property Id = %d", prop_hdr->prop_id);
if (!prop_hdr->sz || !prop_hdr->prop_id) {
VCD_MSG_MED("Bad parameters");
return VCD_ERR_ILLEGAL_PARM;
}
return ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val);
}
static u32 vcd_set_buffer_requirements_cmn
(struct vcd_clnt_ctxt *cctxt,
enum vcd_buffer_type buffer,
struct vcd_buffer_requirement *buffer_req)
{
struct vcd_property_hdr Prop_hdr;
u32 rc = VCD_S_SUCCESS;
struct vcd_buffer_pool *buf_pool;
u32 first_frm_recvd = 0;
VCD_MSG_LOW("vcd_set_buffer_requirements_cmn in %d:",
cctxt->clnt_state.state);
if (!cctxt->decoding &&
cctxt->clnt_state.state != VCD_CLIENT_STATE_OPEN) {
VCD_MSG_ERROR("Bad state (%d) for encoder",
cctxt->clnt_state.state);
return VCD_ERR_BAD_STATE;
}
VCD_MSG_MED("Buffer type = %d", buffer);
if (buffer == VCD_BUFFER_INPUT) {
Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
buf_pool = &cctxt->in_buf_pool;
first_frm_recvd = VCD_FIRST_IP_RCVD;
} else if (buffer == VCD_BUFFER_OUTPUT) {
Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
buf_pool = &cctxt->out_buf_pool;
first_frm_recvd = VCD_FIRST_OP_RCVD;
} else {
rc = VCD_ERR_ILLEGAL_PARM;
}
VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
if (buf_pool->validated > 0) {
VCD_MSG_ERROR("Need to free allocated buffers");
return VCD_ERR_ILLEGAL_OP;
}
first_frm_recvd &= cctxt->status.mask;
if (first_frm_recvd) {
VCD_MSG_ERROR("VCD SetBufReq called when data path is active");
return VCD_ERR_BAD_STATE;
}
Prop_hdr.sz = sizeof(*buffer_req);
rc = ddl_set_property(cctxt->ddl_handle, &Prop_hdr, buffer_req);
VCD_FAILED_RETURN(rc, "Failed: ddl_set_property");
if (buf_pool->entries) {
VCD_MSG_MED("Resetting buffer requirements");
vcd_free_buffer_pool_entries(buf_pool);
}
return rc;
}
static u32 vcd_get_buffer_requirements_cmn
(struct vcd_clnt_ctxt *cctxt,
enum vcd_buffer_type buffer,
struct vcd_buffer_requirement *buffer_req)
{
struct vcd_property_hdr Prop_hdr;
u32 rc = VCD_S_SUCCESS;
VCD_MSG_LOW("vcd_get_buffer_requirements_cmn in %d:",
cctxt->clnt_state.state);
VCD_MSG_MED("Buffer type = %d", buffer);
if (buffer == VCD_BUFFER_INPUT)
Prop_hdr.prop_id = DDL_I_INPUT_BUF_REQ;
else if (buffer == VCD_BUFFER_OUTPUT)
Prop_hdr.prop_id = DDL_I_OUTPUT_BUF_REQ;
else
rc = VCD_ERR_ILLEGAL_PARM;
VCD_FAILED_RETURN(rc, "Invalid buffer type provided");
Prop_hdr.sz = sizeof(*buffer_req);
return ddl_get_property(cctxt->ddl_handle, &Prop_hdr, buffer_req);
}
static u32 vcd_set_buffer_cmn
(struct vcd_clnt_ctxt *cctxt,
enum vcd_buffer_type buffer_type, u8 *buffer, u32 buf_size)
{
u32 rc;
struct vcd_buffer_pool *buf_pool;
VCD_MSG_LOW("vcd_set_buffer_cmn in %d:", cctxt->clnt_state.state);
rc = vcd_common_allocate_set_buffer(cctxt, buffer_type, buf_size,
&buf_pool);
if (!VCD_FAILED(rc)) {
rc = vcd_set_buffer_internal(cctxt, buf_pool, buffer,
buf_size);
}
return rc;
}
static u32 vcd_allocate_buffer_cmn
(struct vcd_clnt_ctxt *cctxt,
enum vcd_buffer_type buffer,
u32 buf_size, u8 **vir_buf_addr, u8 **phy_buf_addr)
{
u32 rc;
struct vcd_buffer_pool *buf_pool;
VCD_MSG_LOW("vcd_allocate_buffer_cmn in %d:",
cctxt->clnt_state.state);
rc = vcd_common_allocate_set_buffer(cctxt, buffer, buf_size,
&buf_pool);
if (!VCD_FAILED(rc)) {
rc = vcd_allocate_buffer_internal(cctxt,
buf_pool,
buf_size,
vir_buf_addr,
phy_buf_addr);
}
return rc;
}
static u32 vcd_free_buffer_cmn
(struct vcd_clnt_ctxt *cctxt,
enum vcd_buffer_type buffer_type, u8 *buffer)
{
VCD_MSG_LOW("vcd_free_buffer_cmn in %d:", cctxt->clnt_state.state);
return vcd_free_one_buffer_internal(cctxt, buffer_type, buffer);
}
static u32 vcd_fill_output_buffer_cmn
(struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *buffer)
{
u32 rc = VCD_S_SUCCESS;
struct vcd_buffer_entry *buf_entry;
u32 result = true;
u32 handled = true;
if (!cctxt || !buffer) {
VCD_MSG_ERROR("%s(): Inavlid params cctxt %p buffer %p",
__func__, cctxt, buffer);
return VCD_ERR_BAD_POINTER;
}
VCD_MSG_LOW("vcd_fill_output_buffer_cmn in %d:",
cctxt->clnt_state.state);
if (cctxt->status.mask & VCD_IN_RECONFIG) {
buffer->time_stamp = 0;
buffer->data_len = 0;
VCD_MSG_LOW("In reconfig: Return output buffer");
cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
VCD_S_SUCCESS,
buffer,
sizeof(struct vcd_frame_data),
cctxt, cctxt->client_data);
return rc;
}
buf_entry = vcd_check_fill_output_buffer(cctxt, buffer);
if (!buf_entry)
return VCD_ERR_BAD_POINTER;
if (!(cctxt->status.mask & VCD_FIRST_OP_RCVD)) {
rc = vcd_handle_first_fill_output_buffer(cctxt, buffer,
&handled);
VCD_FAILED_RETURN(rc,
"Failed: vcd_handle_first_fill_output_buffer");
if (handled)
return rc ;
}
result =
vcd_buffer_pool_entry_en_q(&cctxt->out_buf_pool, buf_entry);
if (!result && !cctxt->decoding) {
VCD_MSG_ERROR("Failed: vcd_buffer_pool_entry_en_q");
return VCD_ERR_FAIL;
}
buf_entry->frame = *buffer;
rc = vcd_return_op_buffer_to_hw(cctxt, buf_entry);
if (!VCD_FAILED(rc) && cctxt->sched_clnt_hdl) {
cctxt->sched_clnt_hdl->tkns++;
vcd_try_submit_frame(cctxt->dev_ctxt);
}
return rc;
}
static u32 vcd_fill_output_buffer_in_eos
(struct vcd_clnt_ctxt *cctxt,
struct vcd_frame_data *buffer)
{
u32 rc = VCD_S_SUCCESS;
struct vcd_buffer_entry *buf_entry;
VCD_MSG_LOW("vcd_fill_output_buffer_in_eos:");
buf_entry = vcd_check_fill_output_buffer(cctxt, buffer);
if (!buf_entry)
return VCD_ERR_BAD_POINTER;
if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
VCD_MSG_HIGH("Got an output buffer we were waiting for");
buf_entry->frame = *buffer;
buf_entry->frame.data_len = 0;
buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS;
buf_entry->frame.ip_frm_tag =
cctxt->status.eos_trig_ip_frm.ip_frm_tag;
buf_entry->frame.time_stamp =
cctxt->status.eos_trig_ip_frm.time_stamp;
cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE,
VCD_S_SUCCESS,
&buf_entry->frame,
sizeof(struct vcd_frame_data),
cctxt, cctxt->client_data);
cctxt->status.mask &= ~VCD_EOS_WAIT_OP_BUF;
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_RUN,
CLIENT_STATE_EVENT_NUMBER
(fill_output_buffer));
} else {
rc = vcd_fill_output_buffer_cmn(cctxt, buffer);
}
return rc;
}
static void vcd_clnt_cb_in_starting
(struct vcd_clnt_ctxt *cctxt,
u32 event, u32 status, void *payload, size_t sz,
u32 *ddl_handle, void *const client_data)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
struct vcd_transc *transc =
(struct vcd_transc *)client_data;
VCD_MSG_LOW("vcd_clnt_cb_in_starting:");
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("vcd_clnt_cb_in_initing: Wrong DDL handle %p",
ddl_handle);
return;
}
switch (event) {
case VCD_EVT_RESP_START:
{
vcd_handle_start_done(cctxt,
(struct vcd_transc *)client_data,
status);
break;
}
case VCD_EVT_RESP_STOP:
{
vcd_handle_stop_done_in_starting(cctxt,
(struct vcd_transc *)client_data,
status);
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
cctxt->status.cmd_submitted--;
vcd_mark_command_channel(cctxt->dev_ctxt, transc);
vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START,
status);
break;
}
default:
{
VCD_MSG_ERROR("Unexpected callback event=%d status=%d "
"from DDL", event, status);
dev_ctxt->command_continue = false;
break;
}
}
}
static void vcd_clnt_cb_in_run
(struct vcd_clnt_ctxt *cctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_INPUT_DONE:
{
rc = vcd_handle_input_done(cctxt, payload, event,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
rc = vcd_handle_frame_done(cctxt, payload, event,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_REQ:
{
rc = vcd_handle_output_required(cctxt, payload,
status);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
rc = vcd_handle_ind_output_reconfig(cctxt, payload,
status);
break;
}
case VCD_EVT_RESP_TRANSACTION_PENDING:
{
vcd_handle_trans_pending(cctxt);
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
vcd_handle_ind_hw_err_fatal(cctxt,
VCD_EVT_IND_HWERRFATAL, status);
break;
}
case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
{
vcd_handle_ind_info_output_reconfig(cctxt, status);
break;
}
default:
{
VCD_MSG_ERROR
("Unexpected callback event=%d status=%d from DDL",
event, status);
dev_ctxt->command_continue = false;
break;
}
}
if (!VCD_FAILED(rc) &&
(event == VCD_EVT_RESP_INPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_REQ)) {
if (((struct ddl_frame_data_tag *)
payload)->frm_trans_end)
vcd_mark_frame_channel(cctxt->dev_ctxt);
}
}
static void vcd_clnt_cb_in_eos
(struct vcd_clnt_ctxt *cctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
struct vcd_transc *transc = NULL;
u32 frm_trans_end = false, rc = VCD_S_SUCCESS;
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_INPUT_DONE:
{
rc = vcd_handle_input_done_in_eos(cctxt, payload,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
rc = vcd_handle_frame_done_in_eos(cctxt, payload,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_REQ:
{
rc = vcd_handle_output_required(cctxt, payload,
status);
break;
}
case VCD_EVT_RESP_EOS_DONE:
{
transc = (struct vcd_transc *)client_data;
vcd_handle_eos_done(cctxt, transc, status);
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
rc = vcd_handle_ind_output_reconfig(cctxt,
payload, status);
if (!VCD_FAILED(rc)) {
frm_trans_end = true;
payload = NULL;
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_RUN,
CLIENT_STATE_EVENT_NUMBER
(clnt_cb));
VCD_MSG_LOW
("RECONFIGinEOS:Suspending Client");
rc = vcd_sched_suspend_resume_clnt(cctxt,
false);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR
("Failed: suspend_resume_clnt. rc=0x%x",
rc);
}
}
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
vcd_handle_ind_hw_err_fatal(cctxt,
VCD_EVT_IND_HWERRFATAL, status);
break;
}
case VCD_EVT_IND_INFO_OUTPUT_RECONFIG:
{
vcd_handle_ind_info_output_reconfig(cctxt, status);
break;
}
default:
{
VCD_MSG_ERROR
("Unexpected callback event=%d status=%d from DDL",
event, status);
dev_ctxt->command_continue = false;
break;
}
}
if (!VCD_FAILED(rc) &&
(event == VCD_EVT_RESP_INPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_REQ ||
event == VCD_EVT_IND_OUTPUT_RECONFIG)) {
if (payload && ((struct ddl_frame_data_tag *)
payload)->frm_trans_end) {
vcd_mark_frame_channel(cctxt->dev_ctxt);
frm_trans_end = true;
}
if (frm_trans_end && !cctxt->status.frame_submitted)
vcd_handle_eos_trans_end(cctxt);
}
}
static void vcd_clnt_cb_in_flushing
(struct vcd_clnt_ctxt *cctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
u32 frm_trans_end = false;
VCD_MSG_LOW("vcd_clnt_cb_in_flushing:");
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_INPUT_DONE:
{
rc = vcd_handle_input_done(cctxt,
payload,
VCD_EVT_RESP_INPUT_FLUSHED,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
rc = vcd_handle_frame_done(cctxt,
payload,
VCD_EVT_RESP_OUTPUT_FLUSHED,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_REQ:
{
rc = vcd_handle_output_required_in_flushing(cctxt,
payload);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
rc = vcd_handle_ind_output_reconfig(cctxt,
payload, status);
if (!VCD_FAILED(rc)) {
frm_trans_end = true;
payload = NULL;
}
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
vcd_handle_ind_hw_err_fatal(cctxt,
VCD_EVT_IND_HWERRFATAL, status);
break;
}
default:
{
VCD_MSG_ERROR
("Unexpected callback event=%d status=%d from DDL",
event, status);
dev_ctxt->command_continue = false;
break;
}
}
if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_REQ ||
event == VCD_EVT_IND_OUTPUT_RECONFIG))) {
if (payload &&
((struct ddl_frame_data_tag *)\
payload)->frm_trans_end) {
vcd_mark_frame_channel(cctxt->dev_ctxt);
frm_trans_end = true;
}
if (frm_trans_end && !cctxt->status.frame_submitted) {
VCD_MSG_HIGH
("All pending frames recvd from DDL");
if (cctxt->status.mask & VCD_FLUSH_INPUT)
vcd_flush_bframe_buffers(cctxt,
VCD_FLUSH_INPUT);
if (cctxt->status.mask & VCD_FLUSH_OUTPUT)
vcd_flush_output_buffers(cctxt);
vcd_send_flush_done(cctxt, VCD_S_SUCCESS);
vcd_release_interim_frame_channels(dev_ctxt);
VCD_MSG_HIGH("Flush complete");
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_RUN,
CLIENT_STATE_EVENT_NUMBER
(clnt_cb));
}
}
}
static void vcd_clnt_cb_in_stopping
(struct vcd_clnt_ctxt *cctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data) {
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
u32 frm_trans_end = false;
VCD_MSG_LOW("vcd_clnt_cb_in_stopping:");
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_INPUT_DONE:
{
rc = vcd_handle_input_done(cctxt,
payload,
VCD_EVT_RESP_INPUT_FLUSHED,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
rc = vcd_handle_frame_done(cctxt,
payload,
VCD_EVT_RESP_OUTPUT_FLUSHED,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_REQ:
{
rc = vcd_handle_output_required_in_flushing(cctxt,
payload);
break;
}
case VCD_EVT_RESP_STOP:
{
vcd_handle_stop_done(cctxt,
(struct vcd_transc *)
client_data, status);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
(void) vcd_handle_ind_output_reconfig(cctxt,
payload, status);
frm_trans_end = true;
payload = NULL;
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_STOP,
status);
break;
}
default:
{
VCD_MSG_ERROR
("Unexpected callback event=%d status=%d from DDL",
event, status);
dev_ctxt->command_continue = false;
break;
}
}
if (!VCD_FAILED(rc) && ((event == VCD_EVT_RESP_INPUT_DONE ||
event == VCD_EVT_RESP_OUTPUT_DONE) ||
event == VCD_EVT_RESP_OUTPUT_REQ ||
event == VCD_EVT_IND_OUTPUT_RECONFIG)) {
if (payload &&
((struct ddl_frame_data_tag *)\
payload)->frm_trans_end) {
vcd_mark_frame_channel(cctxt->dev_ctxt);
frm_trans_end = true;
}
if (frm_trans_end && !cctxt->status.frame_submitted) {
VCD_MSG_HIGH
("All pending frames recvd from DDL");
vcd_flush_bframe_buffers(cctxt,
VCD_FLUSH_INPUT);
vcd_flush_output_buffers(cctxt);
cctxt->status.mask &= ~VCD_FLUSH_ALL;
vcd_release_all_clnt_frm_transc(cctxt);
VCD_MSG_HIGH
("All buffers flushed. Enqueuing stop cmd");
vcd_client_cmd_flush_and_en_q(cctxt,
VCD_CMD_CODEC_STOP);
}
}
}
static void vcd_clnt_cb_in_pausing
(struct vcd_clnt_ctxt *cctxt,
u32 event,
u32 status,
void *payload, size_t sz, u32 *ddl_handle, void *const client_data)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
u32 rc = VCD_S_SUCCESS;
u32 frm_trans_end = false;
VCD_MSG_LOW("vcd_clnt_cb_in_pausing:");
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_INPUT_DONE:
{
rc = vcd_handle_input_done(cctxt, payload, event,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
rc = vcd_handle_frame_done(cctxt, payload, event,
status);
break;
}
case VCD_EVT_RESP_OUTPUT_REQ:
{
rc = vcd_handle_output_required(cctxt, payload,
status);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
rc = vcd_handle_ind_output_reconfig(cctxt,
payload, status);
if (!VCD_FAILED(rc)) {
frm_trans_end = true;
payload = NULL;
}
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
vcd_handle_ind_hw_err_fatal(cctxt,
VCD_EVT_RESP_PAUSE, status);
rc = VCD_ERR_FAIL;
break;
}
default:
{
VCD_MSG_ERROR
("Unexpected callback event=%d status=%d from DDL",
event, status);
dev_ctxt->command_continue = false;
break;
}
}
if (!VCD_FAILED(rc)) {
if (payload &&
((struct ddl_frame_data_tag *)\
payload)->frm_trans_end) {
vcd_mark_frame_channel(cctxt->dev_ctxt);
frm_trans_end = true;
}
if (frm_trans_end && !cctxt->status.frame_submitted) {
VCD_MSG_HIGH
("All pending frames recvd from DDL");
cctxt->callback(VCD_EVT_RESP_PAUSE,
VCD_S_SUCCESS,
NULL,
0,
cctxt,
cctxt->client_data);
vcd_do_client_state_transition(cctxt,
VCD_CLIENT_STATE_PAUSED,
CLIENT_STATE_EVENT_NUMBER
(clnt_cb));
rc = vcd_power_event(cctxt->dev_ctxt,
cctxt,
VCD_EVT_PWR_CLNT_PAUSE);
if (VCD_FAILED(rc)) {
VCD_MSG_ERROR
("VCD_EVT_PWR_CLNT_PAUSE_END"
"failed");
}
}
}
}
static void vcd_clnt_cb_in_invalid(
struct vcd_clnt_ctxt *cctxt, u32 event, u32 status,
void *payload, size_t sz, u32 *ddl_handle,
void *const client_data
)
{
struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt;
VCD_MSG_LOW("vcd_clnt_cb_in_invalid:");
if (cctxt->ddl_handle != ddl_handle) {
VCD_MSG_ERROR("ddl_handle mismatch");
return;
}
switch (event) {
case VCD_EVT_RESP_STOP:
{
vcd_handle_stop_done_in_invalid(cctxt,
(struct vcd_transc *)client_data,
status);
break;
}
case VCD_EVT_RESP_INPUT_DONE:
case VCD_EVT_RESP_OUTPUT_REQ:
{
if (cctxt->status.frame_submitted)
cctxt->status.frame_submitted--;
if (payload && ((struct ddl_frame_data_tag *)
payload)->frm_trans_end)
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
case VCD_EVT_RESP_OUTPUT_DONE:
{
if (payload && ((struct ddl_frame_data_tag *)
payload)->frm_trans_end)
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
case VCD_EVT_RESP_TRANSACTION_PENDING:
{
if (cctxt->status.frame_submitted)
cctxt->status.frame_submitted--;
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
case VCD_EVT_IND_HWERRFATAL:
{
if (status == VCD_ERR_HW_FATAL)
vcd_handle_stop_done_in_invalid(cctxt,
(struct vcd_transc *)client_data,
status);
break;
}
case VCD_EVT_RESP_EOS_DONE:
{
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
case VCD_EVT_IND_OUTPUT_RECONFIG:
{
if (cctxt->status.frame_submitted > 0)
cctxt->status.frame_submitted--;
else
cctxt->status.frame_delayed--;
vcd_mark_frame_channel(cctxt->dev_ctxt);
break;
}
default:
{
VCD_MSG_ERROR("Unexpected callback event=%d status=%d"
"from DDL", event, status);
dev_ctxt->command_continue = false;
break;
}
}
}
static void vcd_clnt_enter_open
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_OPEN on api %d", state_event);
}
static void vcd_clnt_enter_starting
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_STARTING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_START;
}
static void vcd_clnt_enter_run
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_RUN on api %d", state_event);
}
static void vcd_clnt_enter_flushing
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_FLUSHING on api %d",
state_event);
}
static void vcd_clnt_enter_stopping
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_STOPPING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_STOP;
}
static void vcd_clnt_enter_eos(struct vcd_clnt_ctxt *cctxt,
s32 state_event)
{
u32 rc;
VCD_MSG_MED("Entering CLIENT_STATE_EOS on api %d", state_event);
rc = vcd_sched_suspend_resume_clnt(cctxt, false);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt."
"rc=0x%x", rc);
}
static void vcd_clnt_enter_pausing
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Entering CLIENT_STATE_PAUSING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_PAUSE;
}
static void vcd_clnt_enter_paused
(struct vcd_clnt_ctxt *cctxt, s32 state_event)
{
VCD_MSG_MED("Entering CLIENT_STATE_PAUSED on api %d",
state_event);
}
static void vcd_clnt_enter_invalid(struct vcd_clnt_ctxt *cctxt,
s32 state_event)
{
VCD_MSG_MED("Entering CLIENT_STATE_INVALID on api %d",
state_event);
cctxt->ddl_hdl_valid = false;
cctxt->status.mask &= ~(VCD_FIRST_IP_RCVD | VCD_FIRST_OP_RCVD);
if (cctxt->sched_clnt_hdl)
vcd_sched_suspend_resume_clnt(cctxt, false);
}
static void vcd_clnt_exit_open
(struct vcd_clnt_ctxt *cctxt, s32 state_event)
{
VCD_MSG_MED("Exiting CLIENT_STATE_OPEN on api %d", state_event);
}
static void vcd_clnt_exit_starting
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_STARTING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_BASE;
}
static void vcd_clnt_exit_run
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_RUN on api %d", state_event);
}
static void vcd_clnt_exit_flushing
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_FLUSHING on api %d",
state_event);
}
static void vcd_clnt_exit_stopping
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_STOPPING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_BASE;
}
static void vcd_clnt_exit_eos
(struct vcd_clnt_ctxt *cctxt, s32 state_event)
{
u32 rc;
VCD_MSG_MED("Exiting CLIENT_STATE_EOS on api %d", state_event);
rc = vcd_sched_suspend_resume_clnt(cctxt, true);
if (VCD_FAILED(rc))
VCD_MSG_ERROR("Failed: vcd_sched_suspend_resume_clnt. rc=0x%x",
rc);
}
static void vcd_clnt_exit_pausing
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_PAUSING on api %d",
state_event);
cctxt->status.last_evt = VCD_EVT_RESP_BASE;
}
static void vcd_clnt_exit_paused
(struct vcd_clnt_ctxt *cctxt, s32 state_event) {
VCD_MSG_MED("Exiting CLIENT_STATE_PAUSED on api %d",
state_event);
}
static void vcd_clnt_exit_invalid(struct vcd_clnt_ctxt *cctxt,
s32 state_event)
{
VCD_MSG_MED("Exiting CLIENT_STATE_INVALID on api %d",
state_event);
}
void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt,
enum vcd_clnt_state_enum to_state, u32 ev_code)
{
struct vcd_clnt_state_ctxt *state_ctxt;
if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) {
VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d",
cctxt, to_state);
}
state_ctxt = &cctxt->clnt_state;
if (state_ctxt->state == to_state) {
VCD_MSG_HIGH("Client already in requested to_state=%d",
to_state);
return;
}
VCD_MSG_MED("vcd_do_client_state_transition: C%d -> C%d, for api %d",
(int)state_ctxt->state, (int)to_state, ev_code);
if (state_ctxt->state_table->exit)
state_ctxt->state_table->exit(cctxt, ev_code);
state_ctxt->state = to_state;
state_ctxt->state_table = vcd_clnt_state_table[to_state];
if (state_ctxt->state_table->entry)
state_ctxt->state_table->entry(cctxt, ev_code);
}
const struct vcd_clnt_state_table *vcd_get_client_state_table
(enum vcd_clnt_state_enum state) {
return vcd_clnt_state_table[state];
}
static const struct vcd_clnt_state_table vcd_clnt_table_open = {
{
vcd_close_in_open,
vcd_encode_start_in_open,
NULL,
vcd_decode_start_in_open,
NULL,
NULL,
NULL,
vcd_flush_inopen,
vcd_stop_inopen,
vcd_set_property_cmn,
vcd_get_property_cmn,
vcd_set_buffer_requirements_cmn,
vcd_get_buffer_requirements_cmn,
vcd_set_buffer_cmn,
vcd_allocate_buffer_cmn,
vcd_free_buffer_cmn,
vcd_fill_output_buffer_cmn,
NULL,
},
vcd_clnt_enter_open,
vcd_clnt_exit_open
};
static const struct vcd_clnt_state_table vcd_clnt_table_starting = {
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
vcd_get_property_cmn,
NULL,
vcd_get_buffer_requirements_cmn,
NULL,
NULL,
NULL,
NULL,
vcd_clnt_cb_in_starting,
},
vcd_clnt_enter_starting,
vcd_clnt_exit_starting
};
static const struct vcd_clnt_state_table vcd_clnt_table_run = {
{
NULL,
vcd_encode_start_in_run,
vcd_encode_frame_cmn,
vcd_decode_start_in_run,
vcd_decode_frame_cmn,
vcd_pause_in_run,
NULL,
vcd_flush_cmn,
vcd_stop_in_run,
vcd_set_property_cmn,
vcd_get_property_cmn,
vcd_set_buffer_requirements_cmn,
vcd_get_buffer_requirements_cmn,
vcd_set_buffer_cmn,
vcd_allocate_buffer_cmn,
vcd_free_buffer_cmn,
vcd_fill_output_buffer_cmn,
vcd_clnt_cb_in_run,
},
vcd_clnt_enter_run,
vcd_clnt_exit_run
};
static const struct vcd_clnt_state_table vcd_clnt_table_flushing = {
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
vcd_flush_in_flushing,
NULL,
vcd_set_property_cmn,
vcd_get_property_cmn,
NULL,
vcd_get_buffer_requirements_cmn,
NULL,
NULL,
NULL,
vcd_fill_output_buffer_cmn,
vcd_clnt_cb_in_flushing,
},
vcd_clnt_enter_flushing,
vcd_clnt_exit_flushing
};
static const struct vcd_clnt_state_table vcd_clnt_table_stopping = {
{
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
vcd_get_property_cmn,
NULL,
vcd_get_buffer_requirements_cmn,
NULL,
NULL,
NULL,
NULL,
vcd_clnt_cb_in_stopping,
},
vcd_clnt_enter_stopping,
vcd_clnt_exit_stopping
};
static const struct vcd_clnt_state_table vcd_clnt_table_eos = {
{
NULL,
NULL,
vcd_encode_frame_cmn,
NULL,
vcd_decode_frame_cmn,
NULL,
NULL,
vcd_flush_in_eos,
vcd_stop_in_eos,
NULL,
vcd_get_property_cmn,
NULL,
vcd_get_buffer_requirements_cmn,
NULL,
NULL,
NULL,
vcd_fill_output_buffer_in_eos,
vcd_clnt_cb_in_eos,
},
vcd_clnt_enter_eos,
vcd_clnt_exit_eos
};
static const struct vcd_clnt_state_table vcd_clnt_table_pausing = {
{
NULL,
NULL,
vcd_encode_frame_cmn,
NULL,
vcd_decode_frame_cmn,
NULL,
NULL,
NULL,
NULL,
vcd_set_property_cmn,
vcd_get_property_cmn,
NULL,
vcd_get_buffer_requirements_cmn,
NULL,
NULL,
NULL,
vcd_fill_output_buffer_cmn,
vcd_clnt_cb_in_pausing,
},
vcd_clnt_enter_pausing,
vcd_clnt_exit_pausing
};
static const struct vcd_clnt_state_table vcd_clnt_table_paused = {
{
NULL,
NULL,
vcd_encode_frame_cmn,
NULL,
vcd_decode_frame_cmn,
NULL,
vcd_resume_in_paused,
vcd_flush_cmn,
vcd_stop_cmn,
vcd_set_property_cmn,
vcd_get_property_cmn,
vcd_set_buffer_requirements_cmn,
vcd_get_buffer_requirements_cmn,
vcd_set_buffer_cmn,
vcd_allocate_buffer_cmn,
NULL,
vcd_fill_output_buffer_cmn,
NULL,
},
vcd_clnt_enter_paused,
vcd_clnt_exit_paused
};
static const struct vcd_clnt_state_table vcd_clnt_table_invalid = {
{
vcd_close_in_invalid,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
vcd_flush_in_invalid,
vcd_stop_in_invalid,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
vcd_free_buffer_cmn,
NULL,
vcd_clnt_cb_in_invalid,
},
vcd_clnt_enter_invalid,
vcd_clnt_exit_invalid
};
static const struct vcd_clnt_state_table *vcd_clnt_state_table[] = {
NULL,
&vcd_clnt_table_open,
&vcd_clnt_table_starting,
&vcd_clnt_table_run,
&vcd_clnt_table_flushing,
&vcd_clnt_table_pausing,
&vcd_clnt_table_paused,
&vcd_clnt_table_stopping,
&vcd_clnt_table_eos,
&vcd_clnt_table_invalid
};
| gpl-2.0 |
lgeek/linux-tronsmart-orion-r28 | drivers/headset_observe/rk_headset.c | 12 | 14176 | /* arch/arm/mach-rockchip/rk28_headset.c
*
* Copyright (C) 2009 Rockchip Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/switch.h>
#include <linux/input.h>
#include <linux/debugfs.h>
#include <linux/wakelock.h>
#include <asm/gpio.h>
#include <asm/atomic.h>
#include <asm/mach-types.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include "rk_headset.h"
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
/* Debug */
#if 0
#define DBG(x...) printk(x)
#else
#define DBG(x...) do { } while (0)
#endif
#define BIT_HEADSET (1 << 0)
#define BIT_HEADSET_NO_MIC (1 << 1)
#define HEADSET 0
#define HOOK 1
#define HEADSET_IN 1
#define HEADSET_OUT 0
#define HOOK_DOWN 1
#define HOOK_UP 0
#define enable 1
#define disable 0
#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SND_RK29_SOC_RK610)
extern void rk2928_codec_set_spk(bool on);
#endif
#ifdef CONFIG_SND_SOC_WM8994
extern int wm8994_set_status(void);
#endif
/* headset private data */
struct headset_priv {
struct input_dev *input_dev;
struct rk_headset_pdata *pdata;
unsigned int headset_status:1;
unsigned int hook_status:1;
unsigned int isMic:1;
unsigned int isHook_irq:1;
int cur_headset_status;
unsigned int irq[2];
unsigned int irq_type[2];
struct delayed_work h_delayed_work[2];
struct switch_dev sdev;
struct mutex mutex_lock[2];
struct timer_list headset_timer;
unsigned char *keycodes;
};
static struct headset_priv *headset_info;
#ifdef CONFIG_MODEM_MIC_SWITCH
#define HP_MIC 0
#define MAIN_MIC 1
void Modem_Mic_switch(int value)
{
if(value == HP_MIC)
gpio_set_value(headset_info->pdata->mic_switch_gpio, headset_info->pdata->hp_mic_io_value);
else if(value == MAIN_MIC)
gpio_set_value(headset_info->pdata->mic_switch_gpio,headset_info->pdata->main_mic_io_value);
}
void Modem_Mic_release(void)
{
if(headset_info->cur_headset_status == 1)
gpio_set_value(headset_info->pdata->mic_switch_gpio, headset_info->pdata->hp_mic_io_value);
else
gpio_set_value(headset_info->pdata->mic_switch_gpio,headset_info->pdata->main_mic_io_value);
}
#endif
static int read_gpio(int gpio)
{
int i,level;
for(i=0; i<3; i++)
{
level = gpio_get_value(gpio);
if(level < 0)
{
printk("%s:get pin level again,pin=%d,i=%d\n",__FUNCTION__,gpio,i);
msleep(1);
continue;
}
else
break;
}
if(level < 0)
printk("%s:get pin level err!\n",__FUNCTION__);
return level;
}
static irqreturn_t headset_interrupt(int irq, void *dev_id)
{
DBG("---headset_interrupt---\n");
schedule_delayed_work(&headset_info->h_delayed_work[HEADSET], msecs_to_jiffies(50));
return IRQ_HANDLED;
}
static irqreturn_t hook_interrupt(int irq, void *dev_id)
{
DBG("---Hook_interrupt---\n");
// disable_irq_nosync(headset_info->irq[HOOK]);
schedule_delayed_work(&headset_info->h_delayed_work[HOOK], msecs_to_jiffies(100));
return IRQ_HANDLED;
}
static void headsetobserve_work(struct work_struct *work)
{
int level = 0;
int level2 = 0;
struct rk_headset_pdata *pdata = headset_info->pdata;
static unsigned int old_status = 0;
DBG("---headsetobserve_work---\n");
mutex_lock(&headset_info->mutex_lock[HEADSET]);
level = read_gpio(pdata->headset_gpio);
if(level < 0)
goto out;
msleep(100);
level2 = read_gpio(pdata->headset_gpio);
if(level2 < 0)
goto out;
if(level2 != level)
goto out;
old_status = headset_info->headset_status;
if(pdata->headset_insert_type == HEADSET_IN_HIGH)
headset_info->headset_status = level?HEADSET_IN:HEADSET_OUT;
else
headset_info->headset_status = level?HEADSET_OUT:HEADSET_IN;
if(old_status == headset_info->headset_status) {
DBG("old_status == headset_info->headset_status\n");
goto out;
}
DBG("(headset in is %s)headset status is %s\n",
pdata->headset_insert_type?"high level":"low level",
headset_info->headset_status?"in":"out");
if(headset_info->headset_status == HEADSET_IN)
{
headset_info->cur_headset_status = BIT_HEADSET_NO_MIC;
if(pdata->headset_insert_type == HEADSET_IN_HIGH)
irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING);
else
irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING);
if (pdata->hook_gpio) {
del_timer(&headset_info->headset_timer);//Start the timer, wait for switch to the headphone channel
headset_info->headset_timer.expires = jiffies + 100;
add_timer(&headset_info->headset_timer);
goto out;
}
}
else if(headset_info->headset_status == HEADSET_OUT)
{
headset_info->hook_status = HOOK_UP;
if(headset_info->isHook_irq == enable)
{
DBG("disable headset_hook irq\n");
headset_info->isHook_irq = disable;
disable_irq(headset_info->irq[HOOK]);
}
headset_info->cur_headset_status = 0;//~(BIT_HEADSET|BIT_HEADSET_NO_MIC);
//#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SOC_RK3028)
//rk2928_codec_set_spk(HEADSET_OUT);
//#endif
if(pdata->headset_insert_type == HEADSET_IN_HIGH)
irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_RISING);
else
irq_set_irq_type(headset_info->irq[HEADSET],IRQF_TRIGGER_FALLING);
}
// rk28_send_wakeup_key();
switch_set_state(&headset_info->sdev, headset_info->cur_headset_status);
#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SND_RK29_SOC_RK610)
if (headset_info->headset_status == HEADSET_OUT)
{
mdelay(200);
rk2928_codec_set_spk(HEADSET_OUT);
gpio_set_value(pdata->Sw_mic_gpio, headset_info->pdata->Main_mic_io_value);
}
#endif
DBG("headset_info->cur_headset_status = %d\n",headset_info->cur_headset_status);
out:
mutex_unlock(&headset_info->mutex_lock[HEADSET]);
}
static void hook_work(struct work_struct *work)
{
int level = 0;
struct rk_headset_pdata *pdata = headset_info->pdata;
static unsigned int old_status = HOOK_UP;
mutex_lock(&headset_info->mutex_lock[HOOK]);
if(headset_info->headset_status == HEADSET_OUT){
DBG("Headset is out\n");
goto RE_ERROR;
}
#ifdef CONFIG_SND_SOC_WM8994
if(wm8994_set_status() != 0) {
DBG("wm8994 is not set on heatset channel or suspend\n");
goto RE_ERROR;
}
#endif
level = read_gpio(pdata->hook_gpio);
if(level < 0)
goto RE_ERROR;
old_status = headset_info->hook_status;
DBG("Hook_work -- level = %d\n",level);
if(level == 0)
headset_info->hook_status = pdata->hook_down_type == HOOK_DOWN_HIGH?HOOK_UP:HOOK_DOWN;
else if(level > 0)
headset_info->hook_status = pdata->hook_down_type == HOOK_DOWN_HIGH?HOOK_DOWN:HOOK_UP;
if(old_status == headset_info->hook_status)
{
DBG("old_status == headset_info->hook_status\n");
goto RE_ERROR;
}
DBG("Hook_work -- level = %d hook status is %s\n",level,headset_info->hook_status?"key down":"key up");
if(headset_info->hook_status == HOOK_DOWN)
{
if(pdata->hook_down_type == HOOK_DOWN_HIGH)
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_FALLING);
else
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_RISING);
}
else
{
if(pdata->hook_down_type == HOOK_DOWN_HIGH)
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_RISING);
else
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_FALLING);
}
input_report_key(headset_info->input_dev,HOOK_KEY_CODE,headset_info->hook_status);
input_sync(headset_info->input_dev);
RE_ERROR:
mutex_unlock(&headset_info->mutex_lock[HOOK]);
}
static void headset_timer_callback(unsigned long arg)
{
struct headset_priv *headset = (struct headset_priv *)(arg);
struct rk_headset_pdata *pdata = headset->pdata;
int level = 0;
// printk("headset_timer_callback,headset->headset_status=%d\n",headset->headset_status);
if(headset->headset_status == HEADSET_OUT)
{
printk("Headset is out\n");
goto out;
}
#ifdef CONFIG_SND_SOC_WM8994
if(wm8994_set_status() != 0)
{
// printk("wait wm8994 set the MICB2\n");
// headset_info->headset_timer.expires = jiffies + 500;
headset_info->headset_timer.expires = jiffies + 10;
add_timer(&headset_info->headset_timer);
goto out;
}
#endif
level = read_gpio(pdata->hook_gpio);
if(level < 0)
goto out;
if((level > 0 && pdata->hook_down_type == HOOK_DOWN_LOW)
|| (level == 0 && pdata->hook_down_type == HOOK_DOWN_HIGH))
{
headset->isMic = 1;//have mic
DBG("enable headset_hook irq\n");
enable_irq(headset_info->irq[HOOK]);
headset->isHook_irq = enable;
headset_info->hook_status = HOOK_UP;
if(pdata->hook_down_type == HOOK_DOWN_HIGH)
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_RISING);
else
irq_set_irq_type(headset_info->irq[HOOK],IRQF_TRIGGER_FALLING);
}
else
headset->isMic= 0;//No microphone
printk("headset->isMic = %d\n",headset->isMic);
headset_info->cur_headset_status = headset_info->isMic ? 1:2;//BIT_HEADSET:BIT_HEADSET_NO_MIC;//
#if defined(CONFIG_SND_RK_SOC_RK2928) || defined(CONFIG_SND_RK29_SOC_RK610)
rk2928_codec_set_spk(HEADSET_IN);
if(headset_info->cur_headset_status == 1)
gpio_set_value(pdata->Sw_mic_gpio, pdata->Hp_mic_io_value);
#endif
// rk28_send_wakeup_key();
switch_set_state(&headset_info->sdev, headset_info->cur_headset_status);
DBG("headset_info->cur_headset_status = %d\n",headset_info->cur_headset_status);
out:
return;
}
static ssize_t h2w_print_name(struct switch_dev *sdev, char *buf)
{
return sprintf(buf, "Headset\n");
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void headset_early_resume(struct early_suspend *h)
{
schedule_delayed_work(&headset_info->h_delayed_work[HEADSET], msecs_to_jiffies(10));
//DBG(">>>>>headset_early_resume\n");
}
static struct early_suspend hs_early_suspend;
#endif
static int rk_hskey_open(struct input_dev *dev)
{
//struct rk28_adckey *adckey = input_get_drvdata(dev);
// DBG("===========rk_Hskey_open===========\n");
return 0;
}
static void rk_hskey_close(struct input_dev *dev)
{
// DBG("===========rk_Hskey_close===========\n");
// struct rk28_adckey *adckey = input_get_drvdata(dev);
}
int rk_headset_probe(struct platform_device *pdev,struct rk_headset_pdata *pdata)
{
int ret;
struct headset_priv *headset;
headset = kzalloc(sizeof(struct headset_priv), GFP_KERNEL);
if (headset == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
headset->pdata = pdata;
headset->headset_status = HEADSET_OUT;
headset->hook_status = HOOK_UP;
headset->isHook_irq = disable;
headset->cur_headset_status = 0;
headset->sdev.name = "h2w";
headset->sdev.print_name = h2w_print_name;
ret = switch_dev_register(&headset->sdev);
if (ret < 0)
goto failed_free;
mutex_init(&headset->mutex_lock[HEADSET]);
mutex_init(&headset->mutex_lock[HOOK]);
INIT_DELAYED_WORK(&headset->h_delayed_work[HEADSET], headsetobserve_work);
INIT_DELAYED_WORK(&headset->h_delayed_work[HOOK], hook_work);
headset->isMic = 0;
setup_timer(&headset->headset_timer, headset_timer_callback, (unsigned long)headset);
//------------------------------------------------------------------
// Create and register the input driver.
headset->input_dev = input_allocate_device();
if (!headset->input_dev) {
dev_err(&pdev->dev, "failed to allocate input device\n");
ret = -ENOMEM;
goto failed_free;
}
headset->input_dev->name = pdev->name;
headset->input_dev->open = rk_hskey_open;
headset->input_dev->close = rk_hskey_close;
headset->input_dev->dev.parent = &pdev->dev;
//input_dev->phys = KEY_PHYS_NAME;
headset->input_dev->id.vendor = 0x0001;
headset->input_dev->id.product = 0x0001;
headset->input_dev->id.version = 0x0100;
// Register the input device
ret = input_register_device(headset->input_dev);
if (ret) {
dev_err(&pdev->dev, "failed to register input device\n");
goto failed_free_dev;
}
input_set_capability(headset->input_dev, EV_KEY,HOOK_KEY_CODE);
#ifdef CONFIG_HAS_EARLYSUSPEND
hs_early_suspend.suspend = NULL;
hs_early_suspend.resume = headset_early_resume;
hs_early_suspend.level = ~0x0;
register_early_suspend(&hs_early_suspend);
#endif
//------------------------------------------------------------------
if (pdata->headset_gpio) {
if(!pdata->headset_gpio){
dev_err(&pdev->dev,"failed init headset,please full hook_io_init function in board\n");
goto failed_free_dev;
}
headset->irq[HEADSET] = gpio_to_irq(pdata->headset_gpio);
if(pdata->headset_insert_type == HEADSET_IN_HIGH)
headset->irq_type[HEADSET] = IRQF_TRIGGER_RISING;
else
headset->irq_type[HEADSET] = IRQF_TRIGGER_FALLING;
ret = request_irq(headset->irq[HEADSET], headset_interrupt, headset->irq_type[HEADSET], "headset_input", NULL);
if (ret)
goto failed_free_dev;
enable_irq_wake(headset->irq[HEADSET]);
}
else
goto failed_free_dev;
//------------------------------------------------------------------
if (pdata->hook_gpio) {
headset->irq[HOOK] = gpio_to_irq(pdata->hook_gpio);
headset->irq_type[HOOK] = pdata->hook_down_type == HOOK_DOWN_HIGH ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
ret = request_irq(headset->irq[HOOK], hook_interrupt, headset->irq_type[HOOK] , "headset_hook", NULL);
if (ret)
goto failed_free_dev;
disable_irq(headset->irq[HOOK]);
}
//------------------------------------------------------------------
headset_info = headset;
schedule_delayed_work(&headset->h_delayed_work[HEADSET], msecs_to_jiffies(500));
return 0;
failed_free_dev:
platform_set_drvdata(pdev, NULL);
input_free_device(headset->input_dev);
failed_free:
dev_err(&pdev->dev, "failed to headset probe\n");
kfree(headset);
return ret;
}
| gpl-2.0 |
zhoupeng/spice4xen | linux-2.6.18-xen.hg/drivers/usb/net/plusb.c | 12 | 4437 | /*
* PL-2301/2302 USB host-to-host link cables
* Copyright (C) 2000-2005 by David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include "usbnet.h"
/*
* Prolific PL-2301/PL-2302 driver ... http://www.prolifictech.com
*
* The protocol and handshaking used here should be bug-compatible
* with the Linux 2.2 "plusb" driver, by Deti Fliegl.
*
* HEADS UP: this handshaking isn't all that robust. This driver
* gets confused easily if you unplug one end of the cable then
* try to connect it again; you'll need to restart both ends. The
* "naplink" software (used by some PlayStation/2 deveopers) does
* the handshaking much better! Also, sometimes this hardware
* seems to get wedged under load. Prolific docs are weak, and
* don't identify differences between PL2301 and PL2302, much less
* anything to explain the different PL2302 versions observed.
*/
/*
* Bits 0-4 can be used for software handshaking; they're set from
* one end, cleared from the other, "read" with the interrupt byte.
*/
#define PL_S_EN (1<<7) /* (feature only) suspend enable */
/* reserved bit -- rx ready (6) ? */
#define PL_TX_READY (1<<5) /* (interrupt only) transmit ready */
#define PL_RESET_OUT (1<<4) /* reset output pipe */
#define PL_RESET_IN (1<<3) /* reset input pipe */
#define PL_TX_C (1<<2) /* transmission complete */
#define PL_TX_REQ (1<<1) /* transmission received */
#define PL_PEER_E (1<<0) /* peer exists */
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
return usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index,
NULL, 0,
USB_CTRL_GET_TIMEOUT);
}
static inline int
pl_clear_QuickLink_features(struct usbnet *dev, int val)
{
return pl_vendor_req(dev, 1, (u8) val, 0);
}
static inline int
pl_set_QuickLink_features(struct usbnet *dev, int val)
{
return pl_vendor_req(dev, 3, (u8) val, 0);
}
static int pl_reset(struct usbnet *dev)
{
/* some units seem to need this reset, others reject it utterly.
* FIXME be more like "naplink" or windows drivers.
*/
(void) pl_set_QuickLink_features(dev,
PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);
return 0;
}
static const struct driver_info prolific_info = {
.description = "Prolific PL-2301/PL-2302",
.flags = FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
};
/*-------------------------------------------------------------------------*/
/*
* Proilific's name won't normally be on the cables, and
* may not be on the device.
*/
static const struct usb_device_id products [] = {
{
USB_DEVICE(0x067b, 0x0000), // PL-2301
.driver_info = (unsigned long) &prolific_info,
}, {
USB_DEVICE(0x067b, 0x0001), // PL-2302
.driver_info = (unsigned long) &prolific_info,
},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver plusb_driver = {
.name = "plusb",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
};
static int __init plusb_init(void)
{
return usb_register(&plusb_driver);
}
module_init(plusb_init);
static void __exit plusb_exit(void)
{
usb_deregister(&plusb_driver);
}
module_exit(plusb_exit);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Prolific PL-2301/2302 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
siley/deffender-fun-3.3.5a | src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/instance_trial_of_the_crusader.cpp | 12 | 32701 | /*
* Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ScriptMgr.h"
#include "InstanceScript.h"
#include "trial_of_the_crusader.h"
#include "Player.h"
#include "TemporarySummon.h"
class instance_trial_of_the_crusader : public InstanceMapScript
{
public:
instance_trial_of_the_crusader() : InstanceMapScript("instance_trial_of_the_crusader", 649) { }
struct instance_trial_of_the_crusader_InstanceMapScript : public InstanceScript
{
instance_trial_of_the_crusader_InstanceMapScript(Map* map) : InstanceScript(map) { }
void Initialize() override
{
SetBossNumber(MAX_ENCOUNTERS);
TrialCounter = 50;
EventStage = 0;
NorthrendBeasts = NOT_STARTED;
EventTimer = 1000;
NotOneButTwoJormungarsTimer = 0;
ResilienceWillFixItTimer = 0;
SnoboldCount = 0;
MistressOfPainCount = 0;
TributeToImmortalityEligible = true;
NeedSave = false;
TirionFordringGUID = 0;
BarrentGUID = 0;
TirionGUID = 0;
FizzlebangGUID = 0;
GarroshGUID = 0;
VarianGUID = 0;
GormokGUID = 0;
AcidmawGUID = 0;
DreadscaleGUID = 0;
IcehowlGUID = 0;
JaraxxusGUID = 0;
ChampionsControllerGUID = 0;
DarkbaneGUID = 0;
LightbaneGUID = 0;
AnubarakGUID = 0;
TributeChestGUID = 0;
MainGateDoorGUID = 0;
EastPortcullisGUID = 0;
WebDoorGUID = 0;
CrusadersCacheGUID = 0;
FloorGUID = 0;
}
bool IsEncounterInProgress() const override
{
for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i)
if (GetBossState(i) == IN_PROGRESS)
return true;
// Special state is set at Faction Champions after first champ dead, encounter is still in combat
if (GetBossState(BOSS_CRUSADERS) == SPECIAL)
return true;
return false;
}
void OnPlayerEnter(Player* player) override
{
if (instance->IsHeroic())
{
player->SendUpdateWorldState(UPDATE_STATE_UI_SHOW, 1);
player->SendUpdateWorldState(UPDATE_STATE_UI_COUNT, GetData(TYPE_COUNTER));
}
else
player->SendUpdateWorldState(UPDATE_STATE_UI_SHOW, 0);
// make sure Anub'arak isnt missing and floor is destroyed after a crash
if (GetBossState(BOSS_LICH_KING) == DONE && TrialCounter && GetBossState(BOSS_ANUBARAK) != DONE)
{
Creature* anubArak = ObjectAccessor::GetCreature(*player, GetData64(NPC_ANUBARAK));
if (!anubArak)
anubArak = player->SummonCreature(NPC_ANUBARAK, AnubarakLoc[0].GetPositionX(), AnubarakLoc[0].GetPositionY(), AnubarakLoc[0].GetPositionZ(), 3, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME);
if (GameObject* floor = GameObject::GetGameObject(*player, GetData64(GO_ARGENT_COLISEUM_FLOOR)))
floor->SetDestructibleState(GO_DESTRUCTIBLE_DAMAGED);
}
}
void OpenDoor(uint64 guid)
{
if (!guid)
return;
if (GameObject* go = instance->GetGameObject(guid))
go->SetGoState(GO_STATE_ACTIVE_ALTERNATIVE);
}
void CloseDoor(uint64 guid)
{
if (!guid)
return;
if (GameObject* go = instance->GetGameObject(guid))
go->SetGoState(GO_STATE_READY);
}
void OnCreatureCreate(Creature* creature) override
{
switch (creature->GetEntry())
{
case NPC_BARRENT:
BarrentGUID = creature->GetGUID();
if (!TrialCounter)
creature->DespawnOrUnsummon();
break;
case NPC_TIRION:
TirionGUID = creature->GetGUID();
break;
case NPC_TIRION_FORDRING:
TirionFordringGUID = creature->GetGUID();
break;
case NPC_FIZZLEBANG:
FizzlebangGUID = creature->GetGUID();
break;
case NPC_GARROSH:
GarroshGUID = creature->GetGUID();
break;
case NPC_VARIAN:
VarianGUID = creature->GetGUID();
break;
case NPC_GORMOK:
GormokGUID = creature->GetGUID();
break;
case NPC_ACIDMAW:
AcidmawGUID = creature->GetGUID();
break;
case NPC_DREADSCALE:
DreadscaleGUID = creature->GetGUID();
break;
case NPC_ICEHOWL:
IcehowlGUID = creature->GetGUID();
break;
case NPC_JARAXXUS:
JaraxxusGUID = creature->GetGUID();
break;
case NPC_CHAMPIONS_CONTROLLER:
ChampionsControllerGUID = creature->GetGUID();
break;
case NPC_DARKBANE:
DarkbaneGUID = creature->GetGUID();
break;
case NPC_LIGHTBANE:
LightbaneGUID = creature->GetGUID();
break;
case NPC_ANUBARAK:
AnubarakGUID = creature->GetGUID();
break;
default:
break;
}
}
void OnGameObjectCreate(GameObject* go) override
{
switch (go->GetEntry())
{
case GO_CRUSADERS_CACHE_10:
if (instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_NORMAL)
CrusadersCacheGUID = go->GetGUID();
break;
case GO_CRUSADERS_CACHE_25:
if (instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_NORMAL)
CrusadersCacheGUID = go->GetGUID();
break;
case GO_CRUSADERS_CACHE_10_H:
if (instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_HEROIC)
CrusadersCacheGUID = go->GetGUID();
break;
case GO_CRUSADERS_CACHE_25_H:
if (instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_HEROIC)
CrusadersCacheGUID = go->GetGUID();
break;
case GO_ARGENT_COLISEUM_FLOOR:
FloorGUID = go->GetGUID();
break;
case GO_MAIN_GATE_DOOR:
MainGateDoorGUID = go->GetGUID();
break;
case GO_EAST_PORTCULLIS:
EastPortcullisGUID = go->GetGUID();
break;
case GO_WEB_DOOR:
WebDoorGUID = go->GetGUID();
break;
case GO_TRIBUTE_CHEST_10H_25:
case GO_TRIBUTE_CHEST_10H_45:
case GO_TRIBUTE_CHEST_10H_50:
case GO_TRIBUTE_CHEST_10H_99:
case GO_TRIBUTE_CHEST_25H_25:
case GO_TRIBUTE_CHEST_25H_45:
case GO_TRIBUTE_CHEST_25H_50:
case GO_TRIBUTE_CHEST_25H_99:
TributeChestGUID = go->GetGUID();
break;
default:
break;
}
}
bool SetBossState(uint32 type, EncounterState state) override
{
if (!InstanceScript::SetBossState(type, state))
return false;
switch (type)
{
case BOSS_BEASTS:
break;
case BOSS_JARAXXUS:
// Cleanup Icehowl
if (Creature* icehowl = instance->GetCreature(IcehowlGUID))
icehowl->DespawnOrUnsummon();
if (state == DONE)
EventStage = 2000;
break;
case BOSS_CRUSADERS:
// Cleanup Jaraxxus
if (Creature* jaraxxus = instance->GetCreature(JaraxxusGUID))
jaraxxus->DespawnOrUnsummon();
if (Creature* fizzlebang = instance->GetCreature(FizzlebangGUID))
fizzlebang->DespawnOrUnsummon();
switch (state)
{
case IN_PROGRESS:
ResilienceWillFixItTimer = 0;
break;
case SPECIAL: //Means the first blood
ResilienceWillFixItTimer = 60*IN_MILLISECONDS;
state = IN_PROGRESS;
break;
case DONE:
DoUpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_BE_SPELL_TARGET, SPELL_DEFEAT_FACTION_CHAMPIONS);
if (ResilienceWillFixItTimer > 0)
DoUpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_BE_SPELL_TARGET, SPELL_CHAMPIONS_KILLED_IN_MINUTE);
DoRespawnGameObject(CrusadersCacheGUID, 7*DAY);
if (GameObject* cache = instance->GetGameObject(CrusadersCacheGUID))
cache->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE);
EventStage = 3100;
break;
default:
break;
}
break;
case BOSS_VALKIRIES:
// Cleanup chest
if (GameObject* cache = instance->GetGameObject(CrusadersCacheGUID))
cache->Delete();
switch (state)
{
case FAIL:
if (GetBossState(BOSS_VALKIRIES) == NOT_STARTED)
state = NOT_STARTED;
break;
case SPECIAL:
if (GetBossState(BOSS_VALKIRIES) == SPECIAL)
state = DONE;
break;
case DONE:
if (instance->GetPlayers().getFirst()->GetSource()->GetTeam() == ALLIANCE)
EventStage = 4020;
else
EventStage = 4030;
break;
default:
break;
}
break;
case BOSS_LICH_KING:
break;
case BOSS_ANUBARAK:
switch (state)
{
case DONE:
{
EventStage = 6000;
uint32 tributeChest = 0;
if (instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_HEROIC)
{
if (TrialCounter >= 50)
tributeChest = GO_TRIBUTE_CHEST_10H_99;
else
{
if (TrialCounter >= 45)
tributeChest = GO_TRIBUTE_CHEST_10H_50;
else
{
if (TrialCounter >= 25)
tributeChest = GO_TRIBUTE_CHEST_10H_45;
else
tributeChest = GO_TRIBUTE_CHEST_10H_25;
}
}
}
else if (instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_HEROIC)
{
if (TrialCounter >= 50)
tributeChest = GO_TRIBUTE_CHEST_25H_99;
else
{
if (TrialCounter >= 45)
tributeChest = GO_TRIBUTE_CHEST_25H_50;
else
{
if (TrialCounter >= 25)
tributeChest = GO_TRIBUTE_CHEST_25H_45;
else
tributeChest = GO_TRIBUTE_CHEST_25H_25;
}
}
}
if (tributeChest)
if (Creature* tirion = instance->GetCreature(TirionGUID))
if (GameObject* chest = tirion->SummonGameObject(tributeChest, 805.62f, 134.87f, 142.16f, 3.27f, 0, 0, 0, 0, WEEK))
chest->SetRespawnTime(chest->GetRespawnDelay());
break;
}
default:
break;
}
break;
default:
break;
}
if (IsEncounterInProgress())
{
CloseDoor(GetData64(GO_EAST_PORTCULLIS));
CloseDoor(GetData64(GO_WEB_DOOR));
}
else
{
OpenDoor(GetData64(GO_EAST_PORTCULLIS));
OpenDoor(GetData64(GO_WEB_DOOR));
}
if (type < MAX_ENCOUNTERS)
{
TC_LOG_INFO("scripts", "[ToCr] BossState(type %u) %u = state %u;", type, GetBossState(type), state);
if (state == FAIL)
{
if (instance->IsHeroic())
{
--TrialCounter;
// decrease attempt counter at wipe
Map::PlayerList const &PlayerList = instance->GetPlayers();
for (Map::PlayerList::const_iterator itr = PlayerList.begin(); itr != PlayerList.end(); ++itr)
if (Player* player = itr->GetSource())
player->SendUpdateWorldState(UPDATE_STATE_UI_COUNT, TrialCounter);
// if theres no more attemps allowed
if (!TrialCounter)
{
if (Unit* announcer = instance->GetCreature(GetData64(NPC_BARRENT)))
announcer->ToCreature()->DespawnOrUnsummon();
if (Creature* anubArak = instance->GetCreature(GetData64(NPC_ANUBARAK)))
anubArak->DespawnOrUnsummon();
}
}
NeedSave = true;
EventStage = (type == BOSS_BEASTS ? 666 : 0);
state = NOT_STARTED;
}
if (state == DONE || NeedSave)
{
if (Unit* announcer = instance->GetCreature(GetData64(NPC_BARRENT)))
announcer->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP);
Save();
}
}
return true;
}
void SetData(uint32 type, uint32 data) override
{
switch (type)
{
case TYPE_COUNTER:
TrialCounter = data;
data = DONE;
break;
case TYPE_EVENT:
EventStage = data;
data = NOT_STARTED;
break;
case TYPE_EVENT_TIMER:
EventTimer = data;
data = NOT_STARTED;
break;
case TYPE_NORTHREND_BEASTS:
NorthrendBeasts = data;
switch (data)
{
case GORMOK_DONE:
EventStage = 200;
SetData(TYPE_NORTHREND_BEASTS, IN_PROGRESS);
break;
case SNAKES_IN_PROGRESS:
NotOneButTwoJormungarsTimer = 0;
break;
case SNAKES_SPECIAL:
NotOneButTwoJormungarsTimer = 10*IN_MILLISECONDS;
break;
case SNAKES_DONE:
if (NotOneButTwoJormungarsTimer > 0)
DoUpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_BE_SPELL_TARGET, SPELL_WORMS_KILLED_IN_10_SECONDS);
EventStage = 300;
SetData(TYPE_NORTHREND_BEASTS, IN_PROGRESS);
break;
case ICEHOWL_DONE:
EventStage = 400;
SetData(TYPE_NORTHREND_BEASTS, DONE);
SetBossState(BOSS_BEASTS, DONE);
break;
case FAIL:
SetBossState(BOSS_BEASTS, FAIL);
break;
default:
break;
}
break;
//Achievements
case DATA_SNOBOLD_COUNT:
if (data == INCREASE)
++SnoboldCount;
else if (data == DECREASE)
--SnoboldCount;
break;
case DATA_MISTRESS_OF_PAIN_COUNT:
if (data == INCREASE)
++MistressOfPainCount;
else if (data == DECREASE)
--MistressOfPainCount;
break;
case DATA_TRIBUTE_TO_IMMORTALITY_ELIGIBLE:
TributeToImmortalityEligible = false;
break;
default:
break;
}
}
uint64 GetData64(uint32 type) const override
{
switch (type)
{
case NPC_BARRENT:
return BarrentGUID;
case NPC_TIRION:
return TirionGUID;
case NPC_TIRION_FORDRING:
return TirionFordringGUID;
case NPC_FIZZLEBANG:
return FizzlebangGUID;
case NPC_GARROSH:
return GarroshGUID;
case NPC_VARIAN:
return VarianGUID;
case NPC_GORMOK:
return GormokGUID;
case NPC_ACIDMAW:
return AcidmawGUID;
case NPC_DREADSCALE:
return DreadscaleGUID;
case NPC_ICEHOWL:
return IcehowlGUID;
case NPC_JARAXXUS:
return JaraxxusGUID;
case NPC_CHAMPIONS_CONTROLLER:
return ChampionsControllerGUID;
case NPC_DARKBANE:
return DarkbaneGUID;
case NPC_LIGHTBANE:
return LightbaneGUID;
case NPC_ANUBARAK:
return AnubarakGUID;
case GO_ARGENT_COLISEUM_FLOOR:
return FloorGUID;
case GO_MAIN_GATE_DOOR:
return MainGateDoorGUID;
case GO_EAST_PORTCULLIS:
return EastPortcullisGUID;
case GO_WEB_DOOR:
return WebDoorGUID;
default:
break;
}
return 0;
}
uint32 GetData(uint32 type) const override
{
switch (type)
{
case TYPE_COUNTER:
return TrialCounter;
case TYPE_EVENT:
return EventStage;
case TYPE_NORTHREND_BEASTS:
return NorthrendBeasts;
case TYPE_EVENT_TIMER:
return EventTimer;
case TYPE_EVENT_NPC:
switch (EventStage)
{
case 110:
case 140:
case 150:
case 155:
case 200:
case 205:
case 210:
case 220:
case 300:
case 305:
case 310:
case 315:
case 400:
case 666:
case 1010:
case 1180:
case 2000:
case 2030:
case 3000:
case 3001:
case 3060:
case 3061:
case 3090:
case 3091:
case 3092:
case 3100:
case 3110:
case 4000:
case 4010:
case 4015:
case 4016:
case 4040:
case 4050:
case 5000:
case 5005:
case 5020:
case 6000:
case 6005:
case 6010:
return NPC_TIRION;
break;
case 5010:
case 5030:
case 5040:
case 5050:
case 5060:
case 5070:
case 5080:
return NPC_LICH_KING;
break;
case 120:
case 122:
case 2020:
case 3080:
case 3051:
case 3071:
case 4020:
return NPC_VARIAN;
break;
case 130:
case 132:
case 2010:
case 3050:
case 3070:
case 3081:
case 4030:
return NPC_GARROSH;
break;
case 1110:
case 1120:
case 1130:
case 1132:
case 1134:
case 1135:
case 1140:
case 1142:
case 1144:
case 1150:
return NPC_FIZZLEBANG;
break;
default:
return NPC_TIRION;
break;
};
default:
break;
}
return 0;
}
void Update(uint32 diff) override
{
if (GetData(TYPE_NORTHREND_BEASTS) == SNAKES_SPECIAL && NotOneButTwoJormungarsTimer)
{
if (NotOneButTwoJormungarsTimer <= diff)
NotOneButTwoJormungarsTimer = 0;
else
NotOneButTwoJormungarsTimer -= diff;
}
if (GetBossState(BOSS_CRUSADERS) == SPECIAL && ResilienceWillFixItTimer)
{
if (ResilienceWillFixItTimer <= diff)
ResilienceWillFixItTimer = 0;
else
ResilienceWillFixItTimer -= diff;
}
}
void Save()
{
OUT_SAVE_INST_DATA;
std::ostringstream saveStream;
for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i)
saveStream << GetBossState(i) << ' ';
saveStream << TrialCounter;
SaveDataBuffer = saveStream.str();
SaveToDB();
OUT_SAVE_INST_DATA_COMPLETE;
NeedSave = false;
}
std::string GetSaveData() override
{
return SaveDataBuffer;
}
void Load(const char* strIn) override
{
if (!strIn)
{
OUT_LOAD_INST_DATA_FAIL;
return;
}
OUT_LOAD_INST_DATA(strIn);
std::istringstream loadStream(strIn);
for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i)
{
uint32 tmpState;
loadStream >> tmpState;
if (tmpState == IN_PROGRESS || tmpState > SPECIAL)
tmpState = NOT_STARTED;
SetBossState(i, EncounterState(tmpState));
}
loadStream >> TrialCounter;
EventStage = 0;
OUT_LOAD_INST_DATA_COMPLETE;
}
bool CheckAchievementCriteriaMeet(uint32 criteria_id, Player const* /*source*/, Unit const* /*target*/, uint32 /*miscvalue1*/) override
{
switch (criteria_id)
{
case UPPER_BACK_PAIN_10_PLAYER:
case UPPER_BACK_PAIN_10_PLAYER_HEROIC:
return SnoboldCount >= 2;
case UPPER_BACK_PAIN_25_PLAYER:
case UPPER_BACK_PAIN_25_PLAYER_HEROIC:
return SnoboldCount >= 4;
case THREE_SIXTY_PAIN_SPIKE_10_PLAYER:
case THREE_SIXTY_PAIN_SPIKE_10_PLAYER_HEROIC:
case THREE_SIXTY_PAIN_SPIKE_25_PLAYER:
case THREE_SIXTY_PAIN_SPIKE_25_PLAYER_HEROIC:
return MistressOfPainCount >= 2;
case A_TRIBUTE_TO_SKILL_10_PLAYER:
case A_TRIBUTE_TO_SKILL_25_PLAYER:
return TrialCounter >= 25;
case A_TRIBUTE_TO_MAD_SKILL_10_PLAYER:
case A_TRIBUTE_TO_MAD_SKILL_25_PLAYER:
return TrialCounter >= 45;
case A_TRIBUTE_TO_INSANITY_10_PLAYER:
case A_TRIBUTE_TO_INSANITY_25_PLAYER:
case REALM_FIRST_GRAND_CRUSADER:
return TrialCounter == 50;
case A_TRIBUTE_TO_IMMORTALITY_HORDE:
case A_TRIBUTE_TO_IMMORTALITY_ALLIANCE:
return TrialCounter == 50 && TributeToImmortalityEligible;
case A_TRIBUTE_TO_DEDICATED_INSANITY:
return false/*uiGrandCrusaderAttemptsLeft == 50 && !bHasAtAnyStagePlayerEquippedTooGoodItem*/;
default:
break;
}
return false;
}
protected:
uint32 TrialCounter;
uint32 EventStage;
uint32 EventTimer;
uint32 NorthrendBeasts;
bool NeedSave;
std::string SaveDataBuffer;
uint64 BarrentGUID;
uint64 TirionGUID;
uint64 TirionFordringGUID;
uint64 FizzlebangGUID;
uint64 GarroshGUID;
uint64 VarianGUID;
uint64 GormokGUID;
uint64 AcidmawGUID;
uint64 DreadscaleGUID;
uint64 IcehowlGUID;
uint64 JaraxxusGUID;
uint64 ChampionsControllerGUID;
uint64 DarkbaneGUID;
uint64 LightbaneGUID;
uint64 AnubarakGUID;
uint64 CrusadersCacheGUID;
uint64 FloorGUID;
uint64 TributeChestGUID;
uint64 MainGateDoorGUID;
uint64 EastPortcullisGUID;
uint64 WebDoorGUID;
// Achievement stuff
uint32 NotOneButTwoJormungarsTimer;
uint32 ResilienceWillFixItTimer;
uint8 SnoboldCount;
uint8 MistressOfPainCount;
bool TributeToImmortalityEligible;
};
InstanceScript* GetInstanceScript(InstanceMap* map) const override
{
return new instance_trial_of_the_crusader_InstanceMapScript(map);
}
};
void AddSC_instance_trial_of_the_crusader()
{
new instance_trial_of_the_crusader();
}
| gpl-2.0 |
Asus-T100/kernel | arch/x86/kernel/apic/apic.c | 12 | 66300 | /*
* Local APIC handling, local APIC timers
*
* (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
*
* Fixes
* Maciej W. Rozycki : Bits for genuine 82489DX APICs;
* thanks to Eric Gilmore
* and Rolf G. Tews
* for testing these extensively.
* Maciej W. Rozycki : Various updates and fixes.
* Mikael Pettersson : Power Management for UP-APIC.
* Pavel Machek and
* Mikael Pettersson : PM converted to driver model.
*/
#include <linux/perf_event.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi_pmtmr.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/ftrace.h>
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/i8253.h>
#include <linux/dmar.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/dmi.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/trace/irq_vectors.h>
#include <asm/irq_remapping.h>
#include <asm/perf_event.h>
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <linux/atomic.h>
#include <asm/mpspec.h>
#include <asm/i8259.h>
#include <asm/proto.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/idle.h>
#include <asm/mtrr.h>
#include <asm/time.h>
#include <asm/smp.h>
#include <asm/mce.h>
#include <asm/tsc.h>
#include <asm/hypervisor.h>
unsigned int num_processors;
unsigned disabled_cpus;
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
u8 boot_cpu_apic_version;
/*
* The highest APIC ID seen during enumeration.
*/
static unsigned int max_physical_apicid;
/*
* Bitmask of physically existing CPUs:
*/
physid_mask_t phys_cpu_present_map;
/*
* Processor to be disabled specified by kernel parameter
* disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
* avoid undefined behaviour caused by sending INIT from AP to BSP.
*/
static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
/*
* This variable controls which CPUs receive external NMIs. By default,
* external NMIs are delivered only to the BSP.
*/
static int apic_extnmi = APIC_EXTNMI_BSP;
/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
#ifdef CONFIG_X86_32
/*
* On x86_32, the mapping between cpu and logical apicid may vary
* depending on apic in use. The following early percpu variable is
* used for the mapping. This is where the behaviors of x86_64 and 32
* actually diverge. Let's keep it ugly for now.
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
/* Local APIC was disabled by the BIOS and enabled by the kernel */
static int enabled_via_apicbase;
/*
* Handle interrupt mode configuration register (IMCR).
* This register controls whether the interrupt signals
* that reach the BSP come from the master PIC or from the
* local APIC. Before entering Symmetric I/O Mode, either
* the BIOS or the operating system must switch out of
* PIC Mode by changing the IMCR.
*/
static inline void imcr_pic_to_apic(void)
{
/* select IMCR register */
outb(0x70, 0x22);
/* NMI and 8259 INTR go through APIC */
outb(0x01, 0x23);
}
static inline void imcr_apic_to_pic(void)
{
/* select IMCR register */
outb(0x70, 0x22);
/* NMI and 8259 INTR go directly to BSP */
outb(0x00, 0x23);
}
#endif
/*
* Knob to control our willingness to enable the local APIC.
*
* +1=force-enable
*/
static int force_enable_local_apic __initdata;
/*
* APIC command line parameters
*/
static int __init parse_lapic(char *arg)
{
if (IS_ENABLED(CONFIG_X86_32) && !arg)
force_enable_local_apic = 1;
else if (arg && !strncmp(arg, "notscdeadline", 13))
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return 0;
}
early_param("lapic", parse_lapic);
#ifdef CONFIG_X86_64
static int apic_calibrate_pmtmr __initdata;
static __init int setup_apicpmtimer(char *s)
{
apic_calibrate_pmtmr = 1;
notsc_setup(NULL);
return 0;
}
__setup("apicpmtimer", setup_apicpmtimer);
#endif
unsigned long mp_lapic_addr;
int disable_apic;
/* Disable local APIC timer from the kernel commandline or via dmi quirk */
static int disable_apic_timer __initdata;
/* Local APIC timer works in C2 */
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
int first_system_vector = FIRST_SYSTEM_VECTOR;
/*
* Debug level, exported for io_apic.c
*/
unsigned int apic_verbosity;
int pic_mode;
/* Have we found an MP table */
int smp_found_config;
static struct resource lapic_resource = {
.name = "Local APIC",
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
};
unsigned int lapic_timer_frequency = 0;
static void apic_pm_activate(void);
static unsigned long apic_phys;
/*
* Get the LAPIC version
*/
static inline int lapic_get_version(void)
{
return GET_APIC_VERSION(apic_read(APIC_LVR));
}
/*
* Check, if the APIC is integrated or a separate chip
*/
static inline int lapic_is_integrated(void)
{
#ifdef CONFIG_X86_64
return 1;
#else
return APIC_INTEGRATED(lapic_get_version());
#endif
}
/*
* Check, whether this is a modern or a first generation APIC
*/
static int modern_apic(void)
{
/* AMD systems use old APIC versions, so check the CPU */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 >= 0xf)
return 1;
return lapic_get_version() >= 0x14;
}
/*
* right after this call apic become NOOP driven
* so apic->write/read doesn't do anything
*/
static void __init apic_disable(void)
{
pr_info("APIC: switched to apic NOOP\n");
apic = &apic_noop;
}
void native_apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
u32 native_safe_apic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
timeout = 0;
do {
send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
if (!send_status)
break;
inc_irq_stat(icr_read_retry_count);
udelay(100);
} while (timeout++ < 1000);
return send_status;
}
void native_apic_icr_write(u32 low, u32 id)
{
unsigned long flags;
local_irq_save(flags);
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
apic_write(APIC_ICR, low);
local_irq_restore(flags);
}
u64 native_apic_icr_read(void)
{
u32 icr1, icr2;
icr2 = apic_read(APIC_ICR2);
icr1 = apic_read(APIC_ICR);
return icr1 | ((u64)icr2 << 32);
}
#ifdef CONFIG_X86_32
/**
* get_physical_broadcast - Get number of physical broadcast IDs
*/
int get_physical_broadcast(void)
{
return modern_apic() ? 0xff : 0xf;
}
#endif
/**
* lapic_get_maxlvt - get the maximum number of local vector table entries
*/
int lapic_get_maxlvt(void)
{
unsigned int v;
v = apic_read(APIC_LVR);
/*
* - we always have APIC integrated on 64bit mode
* - 82489DXs do not report # of LVT entries
*/
return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
}
/*
* Local APIC timer
*/
/* Clock divisor */
#define APIC_DIVISOR 16
#define TSC_DIVISOR 8
/*
* This function sets up the local APIC timer, with a timeout of
* 'clocks' APIC bus clock. During calibration we actually call
* this function twice on the boot CPU, once with a bogus timeout
* value, second time for real. The other (noncalibrating) CPUs
* call this function only once, with the real, calibrated value.
*
* We do reads before writes even if unnecessary, to get around the
* P5 APIC double write bug.
*/
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{
unsigned int lvtt_value, tmp_value;
lvtt_value = LOCAL_TIMER_VECTOR;
if (!oneshot)
lvtt_value |= APIC_LVT_TIMER_PERIODIC;
else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
if (!lapic_is_integrated())
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
if (!irqen)
lvtt_value |= APIC_LVT_MASKED;
apic_write(APIC_LVTT, lvtt_value);
if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
/*
* See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
* writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
* According to Intel, MFENCE can do the serialization here.
*/
asm volatile("mfence" : : : "memory");
printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
/*
* Divide PICLK by 16
*/
tmp_value = apic_read(APIC_TDCR);
apic_write(APIC_TDCR,
(tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
APIC_TDR_DIV_16);
if (!oneshot)
apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
}
/*
* Setup extended LVT, AMD specific
*
* Software should use the LVT offsets the BIOS provides. The offsets
* are determined by the subsystems using it like those for MCE
* threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
* are supported. Beginning with family 10h at least 4 offsets are
* available.
*
* Since the offsets must be consistent for all cores, we keep track
* of the LVT offsets in software and reserve the offset for the same
* vector also to be used on other cores. An offset is freed by
* setting the entry to APIC_EILVT_MASKED.
*
* If the BIOS is right, there should be no conflicts. Otherwise a
* "[Firmware Bug]: ..." error message is generated. However, if
* software does not properly determines the offsets, it is not
* necessarily a BIOS bug.
*/
static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
{
return (old & APIC_EILVT_MASKED)
|| (new == APIC_EILVT_MASKED)
|| ((new & ~APIC_EILVT_MASKED) == old);
}
static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
{
unsigned int rsvd, vector;
if (offset >= APIC_EILVT_NR_MAX)
return ~0;
rsvd = atomic_read(&eilvt_offsets[offset]);
do {
vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
} while (rsvd != new);
rsvd &= ~APIC_EILVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);
return new;
}
/*
* If mask=1, the LVT entry does not generate interrupts while mask=0
* enables the vector. See also the BKDGs. Must be called with
* preemption disabled.
*/
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
{
unsigned long reg = APIC_EILVTn(offset);
unsigned int new, old, reserved;
new = (mask << 16) | (msg_type << 8) | vector;
old = apic_read(reg);
reserved = reserve_eilvt_offset(offset, new);
if (reserved != new) {
pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
"vector 0x%x, but the register is already in use for "
"vector 0x%x on another cpu\n",
smp_processor_id(), reg, offset, new, reserved);
return -EINVAL;
}
if (!eilvt_entry_is_changeable(old, new)) {
pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
"vector 0x%x, but the register is already in use for "
"vector 0x%x on this cpu\n",
smp_processor_id(), reg, offset, new, old);
return -EBUSY;
}
apic_write(reg, new);
return 0;
}
EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
/*
* Program the next event, relative to now
*/
static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt)
{
apic_write(APIC_TMICT, delta);
return 0;
}
static int lapic_next_deadline(unsigned long delta,
struct clock_event_device *evt)
{
u64 tsc;
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0;
}
static int lapic_timer_shutdown(struct clock_event_device *evt)
{
unsigned int v;
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0);
return 0;
}
static inline int
lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
{
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
__setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
return 0;
}
static int lapic_timer_set_periodic(struct clock_event_device *evt)
{
return lapic_timer_set_periodic_oneshot(evt, false);
}
static int lapic_timer_set_oneshot(struct clock_event_device *evt)
{
return lapic_timer_set_periodic_oneshot(evt, true);
}
/*
* Local APIC timer broadcast function
*/
static void lapic_timer_broadcast(const struct cpumask *mask)
{
#ifdef CONFIG_SMP
apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif
}
/*
* The local apic timer can be used for any function which is CPU local.
*/
static struct clock_event_device lapic_clockevent = {
.name = "lapic",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
| CLOCK_EVT_FEAT_DUMMY,
.shift = 32,
.set_state_shutdown = lapic_timer_shutdown,
.set_state_periodic = lapic_timer_set_periodic,
.set_state_oneshot = lapic_timer_set_oneshot,
.set_next_event = lapic_next_event,
.broadcast = lapic_timer_broadcast,
.rating = 100,
.irq = -1,
};
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
/*
* Setup the local APIC timer for this CPU. Copy the initialized values
* of the boot CPU and register the clock event in the framework.
*/
static void setup_APIC_timer(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
if (this_cpu_has(X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
/* Make LAPIC timer preferrable over percpu HPET */
lapic_clockevent.rating = 150;
}
memcpy(levt, &lapic_clockevent, sizeof(*levt));
levt->cpumask = cpumask_of(smp_processor_id());
if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY);
levt->set_next_event = lapic_next_deadline;
clockevents_config_and_register(levt,
tsc_khz * (1000 / TSC_DIVISOR),
0xF, ~0UL);
} else
clockevents_register_device(levt);
}
/*
* Install the updated TSC frequency from recalibration at the TSC
* deadline clockevent devices.
*/
static void __lapic_update_tsc_freq(void *info)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
return;
clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
}
void lapic_update_tsc_freq(void)
{
/*
* The clockevent device's ->mult and ->shift can both be
* changed. In order to avoid races, schedule the frequency
* update code on each CPU.
*/
on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
}
/*
* In this functions we calibrate APIC bus clocks to the external timer.
*
* We want to do the calibration only once since we want to have local timer
* irqs syncron. CPUs connected by the same APIC bus have the very same bus
* frequency.
*
* This was previously done by reading the PIT/HPET and waiting for a wrap
* around to find out, that a tick has elapsed. I have a box, where the PIT
* readout is broken, so it never gets out of the wait loop again. This was
* also reported by others.
*
* Monitoring the jiffies value is inaccurate and the clockevents
* infrastructure allows us to do a simple substitution of the interrupt
* handler.
*
* The calibration routine also uses the pm_timer when possible, as the PIT
* happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
* back to normal later in the boot process).
*/
#define LAPIC_CAL_LOOPS (HZ/10)
static __initdata int lapic_cal_loops = -1;
static __initdata long lapic_cal_t1, lapic_cal_t2;
static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
/*
* Temporary interrupt handler.
*/
static void __init lapic_cal_handler(struct clock_event_device *dev)
{
unsigned long long tsc = 0;
long tapic = apic_read(APIC_TMCCT);
unsigned long pm = acpi_pm_read_early();
if (boot_cpu_has(X86_FEATURE_TSC))
tsc = rdtsc();
switch (lapic_cal_loops++) {
case 0:
lapic_cal_t1 = tapic;
lapic_cal_tsc1 = tsc;
lapic_cal_pm1 = pm;
lapic_cal_j1 = jiffies;
break;
case LAPIC_CAL_LOOPS:
lapic_cal_t2 = tapic;
lapic_cal_tsc2 = tsc;
if (pm < lapic_cal_pm1)
pm += ACPI_PM_OVRRUN;
lapic_cal_pm2 = pm;
lapic_cal_j2 = jiffies;
break;
}
}
static int __init
calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
{
const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
const long pm_thresh = pm_100ms / 100;
unsigned long mult;
u64 res;
#ifndef CONFIG_X86_PM_TIMER
return -1;
#endif
apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
/* Check, if the PM timer is available */
if (!deltapm)
return -1;
mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
if (deltapm > (pm_100ms - pm_thresh) &&
deltapm < (pm_100ms + pm_thresh)) {
apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
return 0;
}
res = (((u64)deltapm) * mult) >> 22;
do_div(res, 1000000);
pr_warning("APIC calibration not consistent "
"with PM-Timer: %ldms instead of 100ms\n",(long)res);
/* Correct the lapic counter value */
res = (((u64)(*delta)) * pm_100ms);
do_div(res, deltapm);
pr_info("APIC delta adjusted to PM-Timer: "
"%lu (%ld)\n", (unsigned long)res, *delta);
*delta = (long)res;
/* Correct the tsc counter value */
if (boot_cpu_has(X86_FEATURE_TSC)) {
res = (((u64)(*deltatsc)) * pm_100ms);
do_div(res, deltapm);
apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
"PM-Timer: %lu (%ld)\n",
(unsigned long)res, *deltatsc);
*deltatsc = (long)res;
}
return 0;
}
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
void (*real_handler)(struct clock_event_device *dev);
unsigned long deltaj;
long delta, deltatsc;
int pm_referenced = 0;
/**
* check if lapic timer has already been calibrated by platform
* specific routine, such as tsc calibration code. if so, we just fill
* in the clockevent structure and return.
*/
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
return 0;
} else if (lapic_timer_frequency) {
apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
lapic_timer_frequency);
lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
TICK_NSEC, lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
return 0;
}
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
local_irq_disable();
/* Replace the global interrupt handler */
real_handler = global_clock_event->event_handler;
global_clock_event->event_handler = lapic_cal_handler;
/*
* Setup the APIC counter to maximum. There is no way the lapic
* can underflow in the 100ms detection time frame
*/
__setup_APIC_LVTT(0xffffffff, 0, 0);
/* Let the interrupts run */
local_irq_enable();
while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
cpu_relax();
local_irq_disable();
/* Restore the real event handler */
global_clock_event->event_handler = real_handler;
/* Build delta t1-t2 as apic timer counts down */
delta = lapic_cal_t1 - lapic_cal_t2;
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
/* we trust the PM based calibration if possible */
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
&delta, &deltatsc);
/* Calculate the scaled math multiplication factor */
lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
lapic_clockevent.shift);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
lapic_timer_frequency);
if (boot_cpu_has(X86_FEATURE_TSC)) {
apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
"%ld.%04ld MHz.\n",
(deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
(deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
}
apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
"%u.%04u MHz.\n",
lapic_timer_frequency / (1000000 / HZ),
lapic_timer_frequency % (1000000 / HZ));
/*
* Do a sanity check on the APIC calibration result
*/
if (lapic_timer_frequency < (1000000 / HZ)) {
local_irq_enable();
pr_warning("APIC frequency too slow, disabling apic timer\n");
return -1;
}
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
/*
* PM timer calibration failed or not turned on
* so lets try APIC timer based calibration
*/
if (!pm_referenced) {
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
/*
* Setup the apic timer manually
*/
levt->event_handler = lapic_cal_handler;
lapic_timer_set_periodic(levt);
lapic_cal_loops = -1;
/* Let the interrupts run */
local_irq_enable();
while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
cpu_relax();
/* Stop the lapic timer */
local_irq_disable();
lapic_timer_shutdown(levt);
/* Jiffies delta */
deltaj = lapic_cal_j2 - lapic_cal_j1;
apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
/* Check, if the jiffies result is consistent */
if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
else
levt->features |= CLOCK_EVT_FEAT_DUMMY;
}
local_irq_enable();
if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
pr_warning("APIC timer disabled due to verification failure\n");
return -1;
}
return 0;
}
/*
* Setup the boot APIC
*
* Calibrate and verify the result.
*/
void __init setup_boot_APIC_clock(void)
{
/*
* The local apic timer can be disabled via the kernel
* commandline or from the CPU detection code. Register the lapic
* timer as a dummy clock event source on SMP systems, so the
* broadcast mechanism is used. On UP systems simply ignore it.
*/
if (disable_apic_timer) {
pr_info("Disabling APIC timer\n");
/* No broadcast on UP ! */
if (num_possible_cpus() > 1) {
lapic_clockevent.mult = 1;
setup_APIC_timer();
}
return;
}
if (calibrate_APIC_clock()) {
/* No broadcast on UP ! */
if (num_possible_cpus() > 1)
setup_APIC_timer();
return;
}
/*
* If nmi_watchdog is set to IO_APIC, we need the
* PIT/HPET going. Otherwise register lapic as a dummy
* device.
*/
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
}
void setup_secondary_APIC_clock(void)
{
setup_APIC_timer();
}
/*
* The guts of the apic timer interrupt
*/
static void local_apic_timer_interrupt(void)
{
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
/*
* Normally we should not be here till LAPIC has been initialized but
* in some cases like kdump, its possible that there is a pending LAPIC
* timer interrupt from previous kernel's context and is delivered in
* new kernel the moment interrupts are enabled.
*
* Interrupts are enabled early and LAPIC is setup much later, hence
* its possible that when we get here evt->event_handler is NULL.
* Check for event_handler being NULL and discard the interrupt as
* spurious.
*/
if (!evt->event_handler) {
pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */
lapic_timer_shutdown(evt);
return;
}
/*
* the NMI deadlock-detector uses this.
*/
inc_irq_stat(apic_timer_irqs);
evt->event_handler(evt);
}
/*
* Local APIC timer interrupt. This is the most natural way for doing
* local interrupts, but local timer interrupts can be emulated by
* broadcast interrupts too. [in case the hw doesn't support APIC timers]
*
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
/*
* NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow.
*
* update_process_times() expects us to have done irq_enter().
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
entering_ack_irq();
local_apic_timer_interrupt();
exiting_irq();
set_irq_regs(old_regs);
}
__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
/*
* NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow.
*
* update_process_times() expects us to have done irq_enter().
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
entering_ack_irq();
trace_local_timer_entry(LOCAL_TIMER_VECTOR);
local_apic_timer_interrupt();
trace_local_timer_exit(LOCAL_TIMER_VECTOR);
exiting_irq();
set_irq_regs(old_regs);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*
* Local APIC start and shutdown
*/
/**
* clear_local_APIC - shutdown the local APIC
*
* This is called, when a CPU is disabled and before rebooting, so the state of
* the local APIC has no dangling leftovers. Also used to cleanout any BIOS
* leftovers during boot.
*/
void clear_local_APIC(void)
{
int maxlvt;
u32 v;
/* APIC hasn't been mapped yet */
if (!x2apic_mode && !apic_phys)
return;
maxlvt = lapic_get_maxlvt();
/*
* Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
/*
* Careful: we have to set masks only first to deassert
* any level-triggered sources.
*/
v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
/* lets not touch this if we didn't frob it */
#ifdef CONFIG_X86_THERMAL_VECTOR
if (maxlvt >= 5) {
v = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
}
#endif
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 6) {
v = apic_read(APIC_LVTCMCI);
if (!(v & APIC_LVT_MASKED))
apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
}
#endif
/*
* Clean APIC state for other OSs:
*/
apic_write(APIC_LVTT, APIC_LVT_MASKED);
apic_write(APIC_LVT0, APIC_LVT_MASKED);
apic_write(APIC_LVT1, APIC_LVT_MASKED);
if (maxlvt >= 3)
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4)
apic_write(APIC_LVTPC, APIC_LVT_MASKED);
/* Integrated APIC (!82489DX) ? */
if (lapic_is_integrated()) {
if (maxlvt > 3)
/* Clear ESR due to Pentium errata 3AP and 11AP */
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
}
/**
* disable_local_APIC - clear and disable the local APIC
*/
void disable_local_APIC(void)
{
unsigned int value;
/* APIC hasn't been mapped yet */
if (!x2apic_mode && !apic_phys)
return;
clear_local_APIC();
/*
* Disable APIC (implies clearing of registers
* for 82489DX!).
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
apic_write(APIC_SPIV, value);
#ifdef CONFIG_X86_32
/*
* When LAPIC was disabled by the BIOS and enabled by the kernel,
* restore the disabled state.
*/
if (enabled_via_apicbase) {
unsigned int l, h;
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_ENABLE;
wrmsr(MSR_IA32_APICBASE, l, h);
}
#endif
}
/*
* If Linux enabled the LAPIC against the BIOS default disable it down before
* re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
* not power-off. Additionally clear all LVT entries before disable_local_APIC
* for the case where Linux didn't enable the LAPIC.
*/
void lapic_shutdown(void)
{
unsigned long flags;
if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
return;
local_irq_save(flags);
#ifdef CONFIG_X86_32
if (!enabled_via_apicbase)
clear_local_APIC();
else
#endif
disable_local_APIC();
local_irq_restore(flags);
}
/**
* sync_Arb_IDs - synchronize APIC bus arbitration IDs
*/
void __init sync_Arb_IDs(void)
{
/*
* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
* needed on AMD.
*/
if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return;
/*
* Wait for idle.
*/
apic_wait_icr_idle();
apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
apic_write(APIC_ICR, APIC_DEST_ALLINC |
APIC_INT_LEVELTRIG | APIC_DM_INIT);
}
/*
* An initial setup of the virtual wire mode.
*/
void __init init_bsp_APIC(void)
{
unsigned int value;
/*
* Don't do the setup now if we have a SMP BIOS as the
* through-I/O-APIC virtual wire mode might be active.
*/
if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
return;
/*
* Do not trust the local APIC being empty at bootup.
*/
clear_local_APIC();
/*
* Enable APIC.
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
#ifdef CONFIG_X86_32
/* This bit is reserved on P4/Xeon and should be cleared */
if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
(boot_cpu_data.x86 == 15))
value &= ~APIC_SPIV_FOCUS_DISABLED;
else
#endif
value |= APIC_SPIV_FOCUS_DISABLED;
value |= SPURIOUS_APIC_VECTOR;
apic_write(APIC_SPIV, value);
/*
* Set up the virtual wire mode.
*/
apic_write(APIC_LVT0, APIC_DM_EXTINT);
value = APIC_DM_NMI;
if (!lapic_is_integrated()) /* 82489DX */
value |= APIC_LVT_LEVEL_TRIGGER;
if (apic_extnmi == APIC_EXTNMI_NONE)
value |= APIC_LVT_MASKED;
apic_write(APIC_LVT1, value);
}
static void lapic_setup_esr(void)
{
unsigned int oldvalue, value, maxlvt;
if (!lapic_is_integrated()) {
pr_info("No ESR for 82489DX.\n");
return;
}
if (apic->disable_esr) {
/*
* Something untraceable is creating bad interrupts on
* secondary quads ... for the moment, just leave the
* ESR disabled - we can't do anything useful with the
* errors anyway - mbligh
*/
pr_info("Leaving ESR disabled.\n");
return;
}
maxlvt = lapic_get_maxlvt();
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
oldvalue = apic_read(APIC_ESR);
/* enables sending errors */
value = ERROR_APIC_VECTOR;
apic_write(APIC_LVTERR, value);
/*
* spec says clear errors after enabling vector.
*/
if (maxlvt > 3)
apic_write(APIC_ESR, 0);
value = apic_read(APIC_ESR);
if (value != oldvalue)
apic_printk(APIC_VERBOSE, "ESR value before enabling "
"vector: 0x%08x after: 0x%08x\n",
oldvalue, value);
}
/**
* setup_local_APIC - setup the local APIC
*
* Used to setup local APIC while initializing BSP or bringin up APs.
* Always called with preemption disabled.
*/
void setup_local_APIC(void)
{
int cpu = smp_processor_id();
unsigned int value, queued;
int i, j, acked = 0;
unsigned long long tsc = 0, ntsc;
long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (boot_cpu_has(X86_FEATURE_TSC))
tsc = rdtsc();
if (disable_apic) {
disable_ioapic_support();
return;
}
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && apic->disable_esr) {
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
}
#endif
perf_events_lapic_init();
/*
* Double-check whether this APIC is really registered.
* This is meaningless in clustered apic mode, so we skip it.
*/
BUG_ON(!apic->apic_id_registered());
/*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
apic->init_apic_ldr();
#ifdef CONFIG_X86_32
/*
* APIC LDR is initialized. If logical_apicid mapping was
* initialized during get_smp_config(), make sure it matches the
* actual value.
*/
i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
/* always use the value from LDR */
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
logical_smp_processor_id();
#endif
/*
* Set Task Priority to 'accept all'. We never change this
* later on.
*/
value = apic_read(APIC_TASKPRI);
value &= ~APIC_TPRI_MASK;
apic_write(APIC_TASKPRI, value);
/*
* After a crash, we no longer service the interrupts and a pending
* interrupt from previous kernel might still have ISR bit set.
*
* Most probably by now CPU has serviced that pending interrupt and
* it might not have done the ack_APIC_irq() because it thought,
* interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
* does not clear the ISR bit and cpu thinks it has already serivced
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
do {
queued = 0;
for (i = APIC_ISR_NR - 1; i >= 0; i--)
queued |= apic_read(APIC_IRR + i*0x10);
for (i = APIC_ISR_NR - 1; i >= 0; i--) {
value = apic_read(APIC_ISR + i*0x10);
for (j = 31; j >= 0; j--) {
if (value & (1<<j)) {
ack_APIC_irq();
acked++;
}
}
}
if (acked > 256) {
printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
acked);
break;
}
if (queued) {
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
ntsc = rdtsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else
max_loops--;
}
} while (queued && max_loops > 0);
WARN_ON(max_loops <= 0);
/*
* Now that we are all set up, enable the APIC
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
/*
* Enable APIC
*/
value |= APIC_SPIV_APIC_ENABLED;
#ifdef CONFIG_X86_32
/*
* Some unknown Intel IO/APIC (or APIC) errata is biting us with
* certain networking cards. If high frequency interrupts are
* happening on a particular IOAPIC pin, plus the IOAPIC routing
* entry is masked/unmasked at a high rate as well then sooner or
* later IOAPIC line gets 'stuck', no more interrupts are received
* from the device. If focus CPU is disabled then the hang goes
* away, oh well :-(
*
* [ This bug can be reproduced easily with a level-triggered
* PCI Ne2000 networking cards and PII/PIII processors, dual
* BX chipset. ]
*/
/*
* Actually disabling the focus CPU check just makes the hang less
* frequent as it makes the interrupt distributon model be more
* like LRU than MRU (the short-term load is more even across CPUs).
*/
/*
* - enable focus processor (bit==0)
* - 64bit mode always use processor focus
* so no need to set it
*/
value &= ~APIC_SPIV_FOCUS_DISABLED;
#endif
/*
* Set spurious IRQ vector
*/
value |= SPURIOUS_APIC_VECTOR;
apic_write(APIC_SPIV, value);
/*
* Set up LVT0, LVT1:
*
* set up through-local-APIC on the BP's LINT0. This is not
* strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A.
*/
/*
* TODO: set up through-local-APIC from through-I/O-APIC? --macro
*/
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
if (!cpu && (pic_mode || !value)) {
value = APIC_DM_EXTINT;
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
} else {
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
}
apic_write(APIC_LVT0, value);
/*
* Only the BSP sees the LINT1 NMI signal by default. This can be
* modified by apic_extnmi= boot option.
*/
if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
apic_extnmi == APIC_EXTNMI_ALL)
value = APIC_DM_NMI;
else
value = APIC_DM_NMI | APIC_LVT_MASKED;
if (!lapic_is_integrated()) /* 82489DX */
value |= APIC_LVT_LEVEL_TRIGGER;
apic_write(APIC_LVT1, value);
#ifdef CONFIG_X86_MCE_INTEL
/* Recheck CMCI information after local APIC is up on CPU #0 */
if (!cpu)
cmci_recheck();
#endif
}
static void end_local_APIC_setup(void)
{
lapic_setup_esr();
#ifdef CONFIG_X86_32
{
unsigned int value;
/* Disable the local apic timer */
value = apic_read(APIC_LVTT);
value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, value);
}
#endif
apic_pm_activate();
}
/*
* APIC setup function for application processors. Called from smpboot.c
*/
void apic_ap_setup(void)
{
setup_local_APIC();
end_local_APIC_setup();
}
#ifdef CONFIG_X86_X2APIC
int x2apic_mode;
enum {
X2APIC_OFF,
X2APIC_ON,
X2APIC_DISABLED,
};
static int x2apic_state;
static void __x2apic_disable(void)
{
u64 msr;
if (!boot_cpu_has(X86_FEATURE_APIC))
return;
rdmsrl(MSR_IA32_APICBASE, msr);
if (!(msr & X2APIC_ENABLE))
return;
/* Disable xapic and x2apic first and then reenable xapic mode */
wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
printk_once(KERN_INFO "x2apic disabled\n");
}
static void __x2apic_enable(void)
{
u64 msr;
rdmsrl(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return;
wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
printk_once(KERN_INFO "x2apic enabled\n");
}
static int __init setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
int apicid = native_apic_msr_read(APIC_ID);
if (apicid >= 255) {
pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
apicid);
return 0;
}
pr_warning("x2apic already enabled.\n");
__x2apic_disable();
}
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
x2apic_state = X2APIC_DISABLED;
x2apic_mode = 0;
return 0;
}
early_param("nox2apic", setup_nox2apic);
/* Called from cpu_init() to enable x2apic on (secondary) cpus */
void x2apic_setup(void)
{
/*
* If x2apic is not in ON state, disable it if already enabled
* from BIOS.
*/
if (x2apic_state != X2APIC_ON) {
__x2apic_disable();
return;
}
__x2apic_enable();
}
static __init void x2apic_disable(void)
{
u32 x2apic_id, state = x2apic_state;
x2apic_mode = 0;
x2apic_state = X2APIC_DISABLED;
if (state != X2APIC_ON)
return;
x2apic_id = read_apic_id();
if (x2apic_id >= 255)
panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
__x2apic_disable();
register_lapic_address(mp_lapic_addr);
}
static __init void x2apic_enable(void)
{
if (x2apic_state != X2APIC_OFF)
return;
x2apic_mode = 1;
x2apic_state = X2APIC_ON;
__x2apic_enable();
}
static __init void try_to_enable_x2apic(int remap_mode)
{
if (x2apic_state == X2APIC_DISABLED)
return;
if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
/* IR is required if there is APIC ID > 255 even when running
* under KVM
*/
if (max_physical_apicid > 255 ||
!hypervisor_x2apic_available()) {
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
x2apic_disable();
return;
}
/*
* without IR all CPUs can be addressed by IOAPIC/MSI
* only in physical mode
*/
x2apic_phys = 1;
}
x2apic_enable();
}
void __init check_x2apic(void)
{
if (x2apic_enabled()) {
pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
x2apic_mode = 1;
x2apic_state = X2APIC_ON;
} else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
x2apic_state = X2APIC_DISABLED;
}
}
#else /* CONFIG_X86_X2APIC */
static int __init validate_x2apic(void)
{
if (!apic_is_x2apic_enabled())
return 0;
/*
* Checkme: Can we simply turn off x2apic here instead of panic?
*/
panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
}
early_initcall(validate_x2apic);
static inline void try_to_enable_x2apic(int remap_mode) { }
static inline void __x2apic_enable(void) { }
#endif /* !CONFIG_X86_X2APIC */
static int __init try_to_enable_IR(void)
{
#ifdef CONFIG_X86_IO_APIC
if (!x2apic_enabled() && skip_ioapic_setup) {
pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
return -1;
}
#endif
return irq_remapping_enable();
}
void __init enable_IR_x2apic(void)
{
unsigned long flags;
int ret, ir_stat;
if (skip_ioapic_setup)
return;
ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported())
return;
ret = save_ioapic_entries();
if (ret) {
pr_info("Saving IO-APIC state failed: %d\n", ret);
return;
}
local_irq_save(flags);
legacy_pic->mask_all();
mask_ioapic_entries();
/* If irq_remapping_prepare() succeeded, try to enable it */
if (ir_stat >= 0)
ir_stat = try_to_enable_IR();
/* ir_stat contains the remap mode or an error code */
try_to_enable_x2apic(ir_stat);
if (ir_stat < 0)
restore_ioapic_entries();
legacy_pic->restore_mask();
local_irq_restore(flags);
}
#ifdef CONFIG_X86_64
/*
* Detect and enable local APICs on non-SMP boards.
* Original code written by Keir Fraser.
* On AMD64 we trust the BIOS - if it says no APIC it is likely
* not correctly set up (usually the APIC timer won't work etc.)
*/
static int __init detect_init_APIC(void)
{
if (!boot_cpu_has(X86_FEATURE_APIC)) {
pr_info("No local APIC present\n");
return -1;
}
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
return 0;
}
#else
static int __init apic_verify(void)
{
u32 features, h, l;
/*
* The APIC feature bit should now be enabled
* in `cpuid'
*/
features = cpuid_edx(1);
if (!(features & (1 << X86_FEATURE_APIC))) {
pr_warning("Could not enable APIC!\n");
return -1;
}
set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
if (l & MSR_IA32_APICBASE_ENABLE)
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
}
pr_info("Found and enabled local APIC!\n");
return 0;
}
int __init apic_force_enable(unsigned long addr)
{
u32 h, l;
if (disable_apic)
return -1;
/*
* Some BIOSes disable the local APIC in the APIC_BASE
* MSR. This can only be done in software for Intel P6 or later
* and AMD K7 (Model > 1) or later.
*/
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | addr;
wrmsr(MSR_IA32_APICBASE, l, h);
enabled_via_apicbase = 1;
}
}
return apic_verify();
}
/*
* Detect and initialize APIC
*/
static int __init detect_init_APIC(void)
{
/* Disabled by kernel option? */
if (disable_apic)
return -1;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
(boot_cpu_data.x86 >= 15))
break;
goto no_apic;
case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
(boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
break;
goto no_apic;
default:
goto no_apic;
}
if (!boot_cpu_has(X86_FEATURE_APIC)) {
/*
* Over-ride BIOS and try to enable the local APIC only if
* "lapic" specified.
*/
if (!force_enable_local_apic) {
pr_info("Local APIC disabled by BIOS -- "
"you can enable it with \"lapic\"\n");
return -1;
}
if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
return -1;
} else {
if (apic_verify())
return -1;
}
apic_pm_activate();
return 0;
no_apic:
pr_info("No local APIC present or hardware disabled\n");
return -1;
}
#endif
/**
* init_apic_mappings - initialize APIC mappings
*/
void __init init_apic_mappings(void)
{
unsigned int new_apicid;
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
return;
}
/* If no local APIC can be found return early */
if (!smp_found_config && detect_init_APIC()) {
/* lets NOP'ify apic operations */
pr_info("APIC: disable apic facility\n");
apic_disable();
} else {
apic_phys = mp_lapic_addr;
/*
* acpi lapic path already maps that address in
* acpi_register_lapic_address()
*/
if (!acpi_lapic && !smp_found_config)
register_lapic_address(apic_phys);
}
/*
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
new_apicid = read_apic_id();
if (boot_cpu_physical_apicid != new_apicid) {
boot_cpu_physical_apicid = new_apicid;
/*
* yeah -- we lie about apic_version
* in case if apic was disabled via boot option
* but it's not a problem for SMP compiled kernel
* since smp_sanity_check is prepared for such a case
* and disable smp mode
*/
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
}
}
void __init register_lapic_address(unsigned long address)
{
mp_lapic_addr = address;
if (!x2apic_mode) {
set_fixmap_nocache(FIX_APIC_BASE, address);
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
APIC_BASE, address);
}
if (boot_cpu_physical_apicid == -1U) {
boot_cpu_physical_apicid = read_apic_id();
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
}
}
/*
* Local APIC interrupts
*/
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
static void __smp_spurious_interrupt(u8 vector)
{
u32 v;
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
* Spurious interrupts should not be ACKed.
*/
v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
if (v & (1 << (vector & 0x1f)))
ack_APIC_irq();
inc_irq_stat(irq_spurious_count);
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
"should never happen.\n", vector, smp_processor_id());
}
__visible void smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_spurious_interrupt(~regs->orig_ax);
exiting_irq();
}
__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
{
u8 vector = ~regs->orig_ax;
entering_irq();
trace_spurious_apic_entry(vector);
__smp_spurious_interrupt(vector);
trace_spurious_apic_exit(vector);
exiting_irq();
}
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
static void __smp_error_interrupt(struct pt_regs *regs)
{
u32 v;
u32 i = 0;
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
"Receive CS error", /* APIC Error Bit 1 */
"Send accept error", /* APIC Error Bit 2 */
"Receive accept error", /* APIC Error Bit 3 */
"Redirectable IPI", /* APIC Error Bit 4 */
"Send illegal vector", /* APIC Error Bit 5 */
"Received illegal vector", /* APIC Error Bit 6 */
"Illegal register address", /* APIC Error Bit 7 */
};
/* First tickle the hardware, only then report what went on. -- REW */
if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
v = apic_read(APIC_ESR);
ack_APIC_irq();
atomic_inc(&irq_err_count);
apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
smp_processor_id(), v);
v &= 0xff;
while (v) {
if (v & 0x1)
apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
i++;
v >>= 1;
}
apic_printk(APIC_DEBUG, KERN_CONT "\n");
}
__visible void smp_error_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_error_interrupt(regs);
exiting_irq();
}
__visible void smp_trace_error_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_error_apic_entry(ERROR_APIC_VECTOR);
__smp_error_interrupt(regs);
trace_error_apic_exit(ERROR_APIC_VECTOR);
exiting_irq();
}
/**
* connect_bsp_APIC - attach the APIC to the interrupt system
*/
static void __init connect_bsp_APIC(void)
{
#ifdef CONFIG_X86_32
if (pic_mode) {
/*
* Do not trust the local APIC being empty at bootup.
*/
clear_local_APIC();
/*
* PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
* local APIC to INT and NMI lines.
*/
apic_printk(APIC_VERBOSE, "leaving PIC mode, "
"enabling APIC mode.\n");
imcr_pic_to_apic();
}
#endif
}
/**
* disconnect_bsp_APIC - detach the APIC from the interrupt system
* @virt_wire_setup: indicates, whether virtual wire mode is selected
*
* Virtual wire mode is necessary to deliver legacy interrupts even when the
* APIC is disabled.
*/
void disconnect_bsp_APIC(int virt_wire_setup)
{
unsigned int value;
#ifdef CONFIG_X86_32
if (pic_mode) {
/*
* Put the board back into PIC mode (has an effect only on
* certain older boards). Note that APIC interrupts, including
* IPIs, won't work beyond this point! The only exception are
* INIT IPIs.
*/
apic_printk(APIC_VERBOSE, "disabling APIC mode, "
"entering PIC mode.\n");
imcr_apic_to_pic();
return;
}
#endif
/* Go back to Virtual Wire compatibility mode */
/* For the spurious interrupt use vector F, and enable it */
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
value |= 0xf;
apic_write(APIC_SPIV, value);
if (!virt_wire_setup) {
/*
* For LVT0 make it edge triggered, active high,
* external and enabled
*/
value = apic_read(APIC_LVT0);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
apic_write(APIC_LVT0, value);
} else {
/* Disable LVT0 */
apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
/*
* For LVT1 make it edge triggered, active high,
* nmi and enabled
*/
value = apic_read(APIC_LVT1);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
apic_write(APIC_LVT1, value);
}
/*
* The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
* contiguously, it equals to current allocated max logical CPU ID plus 1.
* All allocated CPU ID should be in [0, nr_logical_cpuidi), so the maximum of
* nr_logical_cpuids is nr_cpu_ids.
*
* NOTE: Reserve 0 for BSP.
*/
static int nr_logical_cpuids = 1;
/*
* Used to store mapping between logical CPU IDs and APIC IDs.
*/
static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
/*
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
* and cpuid_to_apicid[] synchronized.
*/
static int allocate_logical_cpuid(int apicid)
{
int i;
/*
* cpuid <-> apicid mapping is persistent, so when a cpu is up,
* check if the kernel has allocated a cpuid for it.
*/
for (i = 0; i < nr_logical_cpuids; i++) {
if (cpuid_to_apicid[i] == apicid)
return i;
}
/* Allocate a new cpuid. */
if (nr_logical_cpuids >= nr_cpu_ids) {
WARN_ONCE(1, "Only %d processors supported."
"Processor %d/0x%x and the rest are ignored.\n",
nr_cpu_ids - 1, nr_logical_cpuids, apicid);
return -1;
}
cpuid_to_apicid[nr_logical_cpuids] = apicid;
return nr_logical_cpuids++;
}
int __generic_processor_info(int apicid, int version, bool enabled)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
phys_cpu_present_map);
/*
* boot_cpu_physical_apicid is designed to have the apicid
* returned by read_apic_id(), i.e, the apicid of the
* currently booting-up processor. However, on some platforms,
* it is temporarily modified by the apicid reported as BSP
* through MP table. Concretely:
*
* - arch/x86/kernel/mpparse.c: MP_processor_info()
* - arch/x86/mm/amdtopology.c: amd_numa_init()
*
* This function is executed with the modified
* boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
* parameter doesn't work to disable APs on kdump 2nd kernel.
*
* Since fixing handling of boot_cpu_physical_apicid requires
* another discussion and tests on each platform, we leave it
* for now and here we use read_apic_id() directly in this
* function, generic_processor_info().
*/
if (disabled_cpu_apicid != BAD_APICID &&
disabled_cpu_apicid != read_apic_id() &&
disabled_cpu_apicid == apicid) {
int thiscpu = num_processors + disabled_cpus;
pr_warning("APIC: Disabling requested cpu."
" Processor %d/0x%x ignored.\n",
thiscpu, apicid);
disabled_cpus++;
return -ENODEV;
}
/*
* If boot cpu has not been detected yet, then only allow upto
* nr_cpu_ids - 1 processors and keep one slot free for boot cpu
*/
if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
apicid != boot_cpu_physical_apicid) {
int thiscpu = max + disabled_cpus - 1;
pr_warning(
"APIC: NR_CPUS/possible_cpus limit of %i almost"
" reached. Keeping one slot for boot cpu."
" Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
disabled_cpus++;
return -ENODEV;
}
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
if (enabled) {
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
"reached. Processor %d/0x%x ignored.\n",
max, thiscpu, apicid);
}
disabled_cpus++;
return -EINVAL;
}
if (apicid == boot_cpu_physical_apicid) {
/*
* x86_bios_cpu_apicid is required to have processors listed
* in same order as logical cpu numbers. Hence the first
* entry is BSP, and so on.
* boot_cpu_init() already hold bit 0 in cpu_present_mask
* for BSP.
*/
cpu = 0;
/* Logical cpuid 0 is reserved for BSP. */
cpuid_to_apicid[0] = apicid;
} else {
cpu = allocate_logical_cpuid(apicid);
if (cpu < 0) {
disabled_cpus++;
return -EINVAL;
}
}
/*
* Validate version
*/
if (version == 0x0) {
pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
cpu, apicid);
version = 0x10;
}
if (version != boot_cpu_apic_version) {
pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
boot_cpu_apic_version, cpu, version);
}
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif
#ifdef CONFIG_X86_32
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true);
if (enabled) {
num_processors++;
physid_set(apicid, phys_cpu_present_map);
set_cpu_present(cpu, true);
} else {
disabled_cpus++;
}
return cpu;
}
int generic_processor_info(int apicid, int version)
{
return __generic_processor_info(apicid, version, true);
}
int hard_smp_processor_id(void)
{
return read_apic_id();
}
void default_init_apic_ldr(void)
{
unsigned long val;
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
apic_write(APIC_LDR, val);
}
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask,
unsigned int *apicid)
{
unsigned int cpu;
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
if (likely(cpu < nr_cpu_ids)) {
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
return 0;
}
return -EINVAL;
}
/*
* Override the generic EOI implementation with an optimized version.
* Only called during early boot when only one CPU is active and with
* interrupts disabled, so we know this does not race with actual APIC driver
* use.
*/
void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
{
struct apic **drv;
for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
/* Should happen once for each apic */
WARN_ON((*drv)->eoi_write == eoi_write);
(*drv)->eoi_write = eoi_write;
}
}
static void __init apic_bsp_up_setup(void)
{
#ifdef CONFIG_X86_64
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
#else
/*
* Hack: In case of kdump, after a crash, kernel might be booting
* on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
* might be zero if read from MP tables. Get it from LAPIC.
*/
# ifdef CONFIG_CRASH_DUMP
boot_cpu_physical_apicid = read_apic_id();
# endif
#endif
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
}
/**
* apic_bsp_setup - Setup function for local apic and io-apic
* @upmode: Force UP mode (for APIC_init_uniprocessor)
*
* Returns:
* apic_id of BSP APIC
*/
int __init apic_bsp_setup(bool upmode)
{
int id;
connect_bsp_APIC();
if (upmode)
apic_bsp_up_setup();
setup_local_APIC();
if (x2apic_mode)
id = apic_read(APIC_LDR);
else
id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
enable_IO_APIC();
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
/* Setup local timer */
x86_init.timers.setup_percpu_clockev();
return id;
}
/*
* This initializes the IO-APIC and APIC hardware if this is
* a UP kernel.
*/
int __init APIC_init_uniprocessor(void)
{
if (disable_apic) {
pr_info("Apic disabled\n");
return -1;
}
#ifdef CONFIG_X86_64
if (!boot_cpu_has(X86_FEATURE_APIC)) {
disable_apic = 1;
pr_info("Apic disabled by BIOS\n");
return -1;
}
#else
if (!smp_found_config && !boot_cpu_has(X86_FEATURE_APIC))
return -1;
/*
* Complain if the BIOS pretends there is one.
*/
if (!boot_cpu_has(X86_FEATURE_APIC) &&
APIC_INTEGRATED(boot_cpu_apic_version)) {
pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
boot_cpu_physical_apicid);
return -1;
}
#endif
if (!smp_found_config)
disable_ioapic_support();
default_setup_apic_routing();
apic_bsp_setup(true);
return 0;
}
#ifdef CONFIG_UP_LATE_INIT
void __init up_late_init(void)
{
APIC_init_uniprocessor();
}
#endif
/*
* Power management
*/
#ifdef CONFIG_PM
static struct {
/*
* 'active' is true if the local APIC was enabled by us and
* not the BIOS; this signifies that we are also responsible
* for disabling it before entering apm/acpi suspend
*/
int active;
/* r/w apic fields */
unsigned int apic_id;
unsigned int apic_taskpri;
unsigned int apic_ldr;
unsigned int apic_dfr;
unsigned int apic_spiv;
unsigned int apic_lvtt;
unsigned int apic_lvtpc;
unsigned int apic_lvt0;
unsigned int apic_lvt1;
unsigned int apic_lvterr;
unsigned int apic_tmict;
unsigned int apic_tdcr;
unsigned int apic_thmr;
unsigned int apic_cmci;
} apic_pm_state;
static int lapic_suspend(void)
{
unsigned long flags;
int maxlvt;
if (!apic_pm_state.active)
return 0;
maxlvt = lapic_get_maxlvt();
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
if (maxlvt >= 4)
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
#ifdef CONFIG_X86_THERMAL_VECTOR
if (maxlvt >= 5)
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
#endif
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 6)
apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
#endif
local_irq_save(flags);
disable_local_APIC();
irq_remapping_disable();
local_irq_restore(flags);
return 0;
}
static void lapic_resume(void)
{
unsigned int l, h;
unsigned long flags;
int maxlvt;
if (!apic_pm_state.active)
return;
local_irq_save(flags);
/*
* IO-APIC and PIC have their own resume routines.
* We just mask them here to make sure the interrupt
* subsystem is completely quiet while we enable x2apic
* and interrupt-remapping.
*/
mask_ioapic_entries();
legacy_pic->mask_all();
if (x2apic_mode) {
__x2apic_enable();
} else {
/*
* Make sure the APICBASE points to the right address
*
* FIXME! This will be wrong if we ever support suspend on
* SMP! We'll need to do this as part of the CPU restore!
*/
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
}
}
maxlvt = lapic_get_maxlvt();
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
#ifdef CONFIG_X86_THERMAL_VECTOR
if (maxlvt >= 5)
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
#endif
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 6)
apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
#endif
if (maxlvt >= 4)
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
irq_remapping_reenable(x2apic_mode);
local_irq_restore(flags);
}
/*
* This device has no shutdown method - fully functioning local APICs
* are needed on every CPU up until machine_halt/restart/poweroff.
*/
static struct syscore_ops lapic_syscore_ops = {
.resume = lapic_resume,
.suspend = lapic_suspend,
};
static void apic_pm_activate(void)
{
apic_pm_state.active = 1;
}
static int __init init_lapic_sysfs(void)
{
/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
if (boot_cpu_has(X86_FEATURE_APIC))
register_syscore_ops(&lapic_syscore_ops);
return 0;
}
/* local apic needs to resume before other devices access its registers. */
core_initcall(init_lapic_sysfs);
#else /* CONFIG_PM */
static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */
#ifdef CONFIG_X86_64
static int multi_checked;
static int multi;
static int set_multi(const struct dmi_system_id *d)
{
if (multi)
return 0;
pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
multi = 1;
return 0;
}
static const struct dmi_system_id multi_dmi_table[] = {
{
.callback = set_multi,
.ident = "IBM System Summit2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
},
},
{}
};
static void dmi_check_multi(void)
{
if (multi_checked)
return;
dmi_check_system(multi_dmi_table);
multi_checked = 1;
}
/*
* apic_is_clustered_box() -- Check if we can expect good TSC
*
* Thus far, the major user of this is IBM's Summit2 series:
* Clustered boxes may have unsynced TSC problems if they are
* multi-chassis.
* Use DMI to check them
*/
int apic_is_clustered_box(void)
{
dmi_check_multi();
return multi;
}
#endif
/*
* APIC command line parameters
*/
static int __init setup_disableapic(char *arg)
{
disable_apic = 1;
setup_clear_cpu_cap(X86_FEATURE_APIC);
return 0;
}
early_param("disableapic", setup_disableapic);
/* same as disableapic, for compatibility */
static int __init setup_nolapic(char *arg)
{
return setup_disableapic(arg);
}
early_param("nolapic", setup_nolapic);
static int __init parse_lapic_timer_c2_ok(char *arg)
{
local_apic_timer_c2_ok = 1;
return 0;
}
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
static int __init parse_disable_apic_timer(char *arg)
{
disable_apic_timer = 1;
return 0;
}
early_param("noapictimer", parse_disable_apic_timer);
static int __init parse_nolapic_timer(char *arg)
{
disable_apic_timer = 1;
return 0;
}
early_param("nolapic_timer", parse_nolapic_timer);
static int __init apic_set_verbosity(char *arg)
{
if (!arg) {
#ifdef CONFIG_X86_64
skip_ioapic_setup = 0;
return 0;
#endif
return -EINVAL;
}
if (strcmp("debug", arg) == 0)
apic_verbosity = APIC_DEBUG;
else if (strcmp("verbose", arg) == 0)
apic_verbosity = APIC_VERBOSE;
else {
pr_warning("APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", arg);
return -EINVAL;
}
return 0;
}
early_param("apic", apic_set_verbosity);
static int __init lapic_insert_resource(void)
{
if (!apic_phys)
return -1;
/* Put local APIC into the resource map. */
lapic_resource.start = apic_phys;
lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
insert_resource(&iomem_resource, &lapic_resource);
return 0;
}
/*
* need call insert after e820_reserve_resources()
* that is using request_resource
*/
late_initcall(lapic_insert_resource);
static int __init apic_set_disabled_cpu_apicid(char *arg)
{
if (!arg || !get_option(&arg, &disabled_cpu_apicid))
return -EINVAL;
return 0;
}
early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
static int __init apic_set_extnmi(char *arg)
{
if (!arg)
return -EINVAL;
if (!strncmp("all", arg, 3))
apic_extnmi = APIC_EXTNMI_ALL;
else if (!strncmp("none", arg, 4))
apic_extnmi = APIC_EXTNMI_NONE;
else if (!strncmp("bsp", arg, 3))
apic_extnmi = APIC_EXTNMI_BSP;
else {
pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
return -EINVAL;
}
return 0;
}
early_param("apic_extnmi", apic_set_extnmi);
| gpl-2.0 |
Biktorgj/kminilte_kernel | arch/arm/plat-samsung/clock.c | 12 | 13247 | /* linux/arch/arm/plat-s3c24xx/clock.c
*
* Copyright 2004-2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX Core clock control support
*
* Based on, and code from linux/arch/arm/mach-versatile/clock.c
**
** Copyright (C) 2004 ARM Limited.
** Written by Deep Blue Solutions Limited.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#if defined(CONFIG_DEBUG_FS)
#include <linux/debugfs.h>
#endif
#include <trace/events/power.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <plat/cpu-freq.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <linux/serial_core.h>
#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
/* clock information */
static LIST_HEAD(clocks);
/* We originally used an mutex here, but some contexts (see resume)
* are calling functions such as clk_set_parent() with IRQs disabled
* causing an BUG to be triggered.
*/
DEFINE_SPINLOCK(clocks_lock);
/* Global watchdog clock used by arch_wtd_reset() callback */
struct clk *s3c2410_wdtclk;
static int __init s3c_wdt_reset_init(void)
{
s3c2410_wdtclk = clk_get(NULL, "watchdog");
if (IS_ERR(s3c2410_wdtclk))
printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
return 0;
}
arch_initcall(s3c_wdt_reset_init);
/* enable and disable calls for use with the clk struct */
static int clk_null_enable(struct clk *clk, int enable)
{
return 0;
}
static int __clk_enable(struct clk *clk)
{
if (IS_ERR(clk) || clk == NULL)
return -EINVAL;
__clk_enable(clk->parent);
if ((clk->usage++) == 0) {
trace_clock_enable(clk->name, 1, smp_processor_id());
(clk->enable)(clk, 1);
}
return 0;
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&clocks_lock, flags);
ret = __clk_enable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
static void __clk_disable(struct clk *clk)
{
if (IS_ERR(clk) || clk == NULL)
return;
if (WARN_ON(!clk->usage)) {
pr_err("%s: clock, %s : %s, already disabled\n", __func__,
clk->devname ? clk->devname : "", clk->name);
return;
}
if ((--clk->usage) == 0) {
trace_clock_disable(clk->name, 0, smp_processor_id());
(clk->enable)(clk, 0);
}
__clk_disable(clk->parent);
return;
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
}
unsigned long clk_get_rate(struct clk *clk)
{
if (IS_ERR(clk))
return 0;
if (clk->ops != NULL && clk->ops->get_rate != NULL)
return (clk->ops->get_rate)(clk);
if (clk->parent != NULL)
return clk_get_rate(clk->parent);
return clk->rate;
}
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (!IS_ERR(clk) && clk->ops && clk->ops->round_rate)
return (clk->ops->round_rate)(clk, rate);
return rate;
}
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret;
unsigned long flags;
if (IS_ERR(clk))
return -EINVAL;
/* We do not default just do a clk->rate = rate as
* the clock may have been made this way by choice.
*/
WARN_ON(clk->ops == NULL);
WARN_ON(clk->ops && clk->ops->set_rate == NULL);
if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
spin_lock_irqsave(&clocks_lock, flags);
trace_clock_set_rate(clk->name, rate, smp_processor_id());
ret = (clk->ops->set_rate)(clk, rate);
spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
struct clk *__clk_get_parent(struct clk *clk)
{
if (clk->ops && clk->ops->get_parent)
return clk->ops->get_parent(clk);
else
return clk->parent;
}
struct clk *clk_get_parent(struct clk *clk)
{
struct clk *ret;
unsigned long flags;
if (IS_ERR(clk))
return ERR_PTR(EINVAL);
spin_lock_irqsave(&clocks_lock, flags);
ret = __clk_get_parent(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
unsigned long flags;
struct clk *old_parent;
int i;
if (IS_ERR(clk))
return -EINVAL;
old_parent = clk->parent;
spin_lock_irqsave(&clocks_lock, flags);
if (clk->usage) {
for (i = 0; i < clk->usage; i++)
__clk_enable(parent);
}
if (clk->ops && clk->ops->set_parent) {
trace_clock_set_parent(clk->name, parent->name);
ret = (clk->ops->set_parent)(clk, parent);
}
if (clk->usage) {
for (i = 0; i < clk->usage; i++)
__clk_disable(old_parent);
}
spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
EXPORT_SYMBOL(clk_disable);
EXPORT_SYMBOL(clk_get_rate);
EXPORT_SYMBOL(clk_round_rate);
EXPORT_SYMBOL(clk_set_rate);
EXPORT_SYMBOL(clk_get_parent);
EXPORT_SYMBOL(clk_set_parent);
/* base clocks */
int clk_default_setrate(struct clk *clk, unsigned long rate)
{
clk->rate = rate;
return 0;
}
struct clk_ops clk_ops_def_setrate = {
.set_rate = clk_default_setrate,
};
struct clk clk_xtal = {
.name = "xtal",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
};
struct clk clk_ext = {
.name = "ext",
};
struct clk clk_epll = {
.name = "epll",
};
struct clk clk_mpll = {
.name = "mpll",
.ops = &clk_ops_def_setrate,
};
struct clk clk_upll = {
.name = "upll",
.parent = NULL,
.ctrlbit = 0,
};
struct clk clk_f = {
.name = "fclk",
.rate = 0,
.parent = &clk_mpll,
.ctrlbit = 0,
};
struct clk clk_h = {
.name = "hclk",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
.ops = &clk_ops_def_setrate,
};
struct clk clk_p = {
.name = "pclk",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
.ops = &clk_ops_def_setrate,
};
struct clk clk_usb_bus = {
.name = "usb-bus",
.rate = 0,
.parent = &clk_upll,
};
struct clk s3c24xx_uclk = {
.name = "uclk",
};
/* initialise the clock system */
/**
* s3c24xx_register_clock() - register a clock
* @clk: The clock to register
*
* Add the specified clock to the list of clocks known by the system.
*/
int s3c24xx_register_clock(struct clk *clk)
{
if (clk->enable == NULL)
clk->enable = clk_null_enable;
if (clk->init)
clk->init(clk);
/* add to the list of available clocks */
/* Quick check to see if this clock has already been registered. */
BUG_ON(clk->list.prev != clk->list.next);
spin_lock(&clocks_lock);
list_add(&clk->list, &clocks);
spin_unlock(&clocks_lock);
/* fill up the clk_lookup structure and register it*/
clk->lookup.dev_id = clk->devname;
clk->lookup.con_id = clk->name;
clk->lookup.clk = clk;
clkdev_add(&clk->lookup);
return 0;
}
/**
* s3c24xx_register_clocks() - register an array of clock pointers
* @clks: Pointer to an array of struct clk pointers
* @nr_clks: The number of clocks in the @clks array.
*
* Call s3c24xx_register_clock() for all the clock pointers contained
* in the @clks list. Returns the number of failures.
*/
int s3c24xx_register_clocks(struct clk **clks, int nr_clks)
{
int fails = 0;
for (; nr_clks > 0; nr_clks--, clks++) {
if (s3c24xx_register_clock(*clks) < 0) {
struct clk *clk = *clks;
printk(KERN_ERR "%s: failed to register %p: %s\n",
__func__, clk, clk->name);
fails++;
}
}
return fails;
}
/**
* s3c_register_clocks() - register an array of clocks
* @clkp: Pointer to the first clock in the array.
* @nr_clks: Number of clocks to register.
*
* Call s3c24xx_register_clock() on the @clkp array given, printing an
* error if it fails to register the clock (unlikely).
*/
void __init s3c_register_clocks(struct clk *clkp, int nr_clks)
{
int ret;
for (; nr_clks > 0; nr_clks--, clkp++) {
ret = s3c24xx_register_clock(clkp);
if (ret < 0) {
printk(KERN_ERR "Failed to register clock %s (%d)\n",
clkp->name, ret);
}
}
}
/**
* s3c_disable_clocks() - disable an array of clocks
* @clkp: Pointer to the first clock in the array.
* @nr_clks: Number of clocks to register.
*
* for internal use only at initialisation time. disable the clocks in the
* @clkp array.
*/
void __init s3c_disable_clocks(struct clk *clkp, int nr_clks)
{
for (; nr_clks > 0; nr_clks--, clkp++)
(clkp->enable)(clkp, 0);
}
/* initialise all the clocks */
int __init s3c24xx_register_baseclocks(unsigned long xtal)
{
printk(KERN_INFO "S3C24XX Clocks, Copyright 2004 Simtec Electronics\n");
clk_xtal.rate = xtal;
/* register our clocks */
if (s3c24xx_register_clock(&clk_xtal) < 0)
printk(KERN_ERR "failed to register master xtal\n");
if (s3c24xx_register_clock(&clk_mpll) < 0)
printk(KERN_ERR "failed to register mpll clock\n");
if (s3c24xx_register_clock(&clk_upll) < 0)
printk(KERN_ERR "failed to register upll clock\n");
if (s3c24xx_register_clock(&clk_f) < 0)
printk(KERN_ERR "failed to register cpu fclk\n");
if (s3c24xx_register_clock(&clk_h) < 0)
printk(KERN_ERR "failed to register cpu hclk\n");
if (s3c24xx_register_clock(&clk_p) < 0)
printk(KERN_ERR "failed to register cpu pclk\n");
return 0;
}
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
/* debugfs support to trace clock tree hierarchy and attributes */
static struct dentry *clk_debugfs_root;
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
const char *state;
char buf[255] = { 0 };
int n = 0;
if (c->name)
n = snprintf(buf, sizeof(buf) - 1, "%s", c->name);
if (c->devname)
n += snprintf(buf + n, sizeof(buf) - 1 - n, ":%s", c->devname);
state = (c->usage > 0) ? "on" : "off";
seq_printf(s, "%*s%-*s %-6s %-3d %-10lu\n",
level * 3 + 1, "",
50 - level * 3, buf,
state, c->usage, c->usage ? clk_get_rate(c) : 0);
list_for_each_entry(child, &clocks, list) {
if (child->parent != c)
continue;
clock_tree_show_one(s, child, level + 1);
}
}
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
unsigned long flags;
seq_printf(s, " clock state ref rate\n");
seq_printf(s, "--------------------------------------------------------------------\n");
spin_lock_irqsave(&clocks_lock, flags);
list_for_each_entry(c, &clocks, list)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
static int clock_tree_open(struct inode *inode, struct file *file)
{
return single_open(file, clock_tree_show, inode->i_private);
}
static const struct file_operations clock_tree_fops = {
.open = clock_tree_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int clock_rate_show(void *data, u64 *val)
{
struct clk *c = data;
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
*val = c->usage ? clk_get_rate(c) : 0;
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_rate_show, NULL, "%llu\n");
static int clk_debugfs_register_one(struct clk *c)
{
int err;
struct dentry *d;
struct clk *pa = c->parent;
char s[255];
char *p = s;
if (c->name)
p += sprintf(p, "%s", c->name);
if (c->devname)
p += sprintf(p, ":%s", c->devname);
d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
if (!d)
return -ENOMEM;
c->dent = d;
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usage);
if (!d) {
err = -ENOMEM;
goto err_out;
}
d = debugfs_create_file("rate", S_IRUGO, c->dent, c, &clock_rate_fops);
if (!d) {
err = -ENOMEM;
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(c->dent);
return err;
}
static int clk_debugfs_register(struct clk *c)
{
int err;
struct clk *pa = c->parent;
if (pa && !pa->dent) {
err = clk_debugfs_register(pa);
if (err)
return err;
}
if (!c->dent) {
err = clk_debugfs_register_one(c);
if (err)
return err;
}
return 0;
}
static int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
int err = -ENOMEM;
d = debugfs_create_dir("clock", NULL);
if (!d)
return -ENOMEM;
clk_debugfs_root = d;
d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
&clock_tree_fops);
if (!d)
goto err_out;
list_for_each_entry(c, &clocks, list) {
err = clk_debugfs_register(c);
if (err)
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
return err;
}
late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
| gpl-2.0 |
OLIMEX/linux-3.12.10-ti2013.12.01-am3352_som | arch/arm/mach-s5pv210/clock.c | 524 | 34172 | /* linux/arch/arm/mach-s5pv210/clock.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - Clock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <mach/map.h>
#include <plat/cpu-freq.h>
#include <mach/regs-clock.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
#include "common.h"
static unsigned long xtal;
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
};
static struct clksrc_clk clk_mout_epll = {
.clk = {
.name = "mout_epll",
},
.sources = &clk_src_epll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
};
static struct clksrc_clk clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
},
.sources = &clk_src_mpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
};
static struct clk *clkset_armclk_list[] = {
[0] = &clk_mout_apll.clk,
[1] = &clk_mout_mpll.clk,
};
static struct clksrc_sources clkset_armclk = {
.sources = clkset_armclk_list,
.nr_sources = ARRAY_SIZE(clkset_armclk_list),
};
static struct clksrc_clk clk_armclk = {
.clk = {
.name = "armclk",
},
.sources = &clkset_armclk,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 },
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 },
};
static struct clksrc_clk clk_hclk_msys = {
.clk = {
.name = "hclk_msys",
.parent = &clk_armclk.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 },
};
static struct clksrc_clk clk_pclk_msys = {
.clk = {
.name = "pclk_msys",
.parent = &clk_hclk_msys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 },
};
static struct clksrc_clk clk_sclk_a2m = {
.clk = {
.name = "sclk_a2m",
.parent = &clk_mout_apll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
};
static struct clk *clkset_hclk_sys_list[] = {
[0] = &clk_mout_mpll.clk,
[1] = &clk_sclk_a2m.clk,
};
static struct clksrc_sources clkset_hclk_sys = {
.sources = clkset_hclk_sys_list,
.nr_sources = ARRAY_SIZE(clkset_hclk_sys_list),
};
static struct clksrc_clk clk_hclk_dsys = {
.clk = {
.name = "hclk_dsys",
},
.sources = &clkset_hclk_sys,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 },
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 },
};
static struct clksrc_clk clk_pclk_dsys = {
.clk = {
.name = "pclk_dsys",
.parent = &clk_hclk_dsys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
};
static struct clksrc_clk clk_hclk_psys = {
.clk = {
.name = "hclk_psys",
},
.sources = &clkset_hclk_sys,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 },
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 },
};
static struct clksrc_clk clk_pclk_psys = {
.clk = {
.name = "pclk_psys",
.parent = &clk_hclk_psys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
};
static int s5pv210_clk_ip0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
}
static int s5pv210_clk_ip1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP1, clk, enable);
}
static int s5pv210_clk_ip2_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP2, clk, enable);
}
static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
}
static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
}
static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable);
}
static int s5pv210_clk_hdmiphy_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
}
static int exynos4_clk_dac_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_DAC_PHY_CONTROL, clk, enable);
}
static struct clk clk_sclk_hdmi27m = {
.name = "sclk_hdmi27m",
.rate = 27000000,
};
static struct clk clk_sclk_hdmiphy = {
.name = "sclk_hdmiphy",
};
static struct clk clk_sclk_usbphy0 = {
.name = "sclk_usbphy0",
};
static struct clk clk_sclk_usbphy1 = {
.name = "sclk_usbphy1",
};
static struct clk clk_pcmcdclk0 = {
.name = "pcmcdclk",
};
static struct clk clk_pcmcdclk1 = {
.name = "pcmcdclk",
};
static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk",
};
static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m,
};
static struct clksrc_sources clkset_vpllsrc = {
.sources = clkset_vpllsrc_list,
.nr_sources = ARRAY_SIZE(clkset_vpllsrc_list),
};
static struct clksrc_clk clk_vpllsrc = {
.clk = {
.name = "vpll_src",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 7),
},
.sources = &clkset_vpllsrc,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 28, .size = 1 },
};
static struct clk *clkset_sclk_vpll_list[] = {
[0] = &clk_vpllsrc.clk,
[1] = &clk_fout_vpll,
};
static struct clksrc_sources clkset_sclk_vpll = {
.sources = clkset_sclk_vpll_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_vpll_list),
};
static struct clksrc_clk clk_sclk_vpll = {
.clk = {
.name = "sclk_vpll",
},
.sources = &clkset_sclk_vpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
};
static struct clk *clkset_moutdmc0src_list[] = {
[0] = &clk_sclk_a2m.clk,
[1] = &clk_mout_mpll.clk,
[2] = NULL,
[3] = NULL,
};
static struct clksrc_sources clkset_moutdmc0src = {
.sources = clkset_moutdmc0src_list,
.nr_sources = ARRAY_SIZE(clkset_moutdmc0src_list),
};
static struct clksrc_clk clk_mout_dmc0 = {
.clk = {
.name = "mout_dmc0",
},
.sources = &clkset_moutdmc0src,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
};
static struct clksrc_clk clk_sclk_dmc0 = {
.clk = {
.name = "sclk_dmc0",
.parent = &clk_mout_dmc0.clk,
},
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 },
};
static unsigned long s5pv210_clk_imem_get_rate(struct clk *clk)
{
return clk_get_rate(clk->parent) / 2;
}
static struct clk_ops clk_hclk_imem_ops = {
.get_rate = s5pv210_clk_imem_get_rate,
};
static unsigned long s5pv210_clk_fout_apll_get_rate(struct clk *clk)
{
return s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
}
static struct clk_ops clk_fout_apll_ops = {
.get_rate = s5pv210_clk_fout_apll_get_rate,
};
static struct clk init_clocks_off[] = {
{
.name = "rot",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
}, {
.name = "fimc",
.devname = "s5pv210-fimc.0",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 24),
}, {
.name = "fimc",
.devname = "s5pv210-fimc.1",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "fimc",
.devname = "s5pv210-fimc.2",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 26),
}, {
.name = "jpeg",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 28),
}, {
.name = "mfc",
.devname = "s5p-mfc",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "dac",
.devname = "s5p-sdo",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "mixer",
.devname = "s5p-mixer",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "vp",
.devname = "s5p-mixer",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "hdmi",
.devname = "s5pv210-hdmi",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "hdmiphy",
.devname = "s5pv210-hdmi",
.enable = s5pv210_clk_hdmiphy_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "dacphy",
.devname = "s5p-sdo",
.enable = exynos4_clk_dac_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "otg",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "usb-host",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<17),
}, {
.name = "lcd",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<0),
}, {
.name = "cfcon",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<25),
}, {
.name = "systimer",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "watchdog",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<22),
}, {
.name = "rtc",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<15),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<7),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<9),
}, {
.name = "i2c",
.devname = "s3c2440-hdmiphy-i2c",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "spi",
.devname = "s5pv210-spi.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<12),
}, {
.name = "spi",
.devname = "s5pv210-spi.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<13),
}, {
.name = "spi",
.devname = "s5pv210-spi.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<14),
}, {
.name = "timers",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<23),
}, {
.name = "adc",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "keypad",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<21),
}, {
.name = "iis",
.devname = "samsung-i2s.0",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<4),
}, {
.name = "iis",
.devname = "samsung-i2s.1",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "iis",
.devname = "samsung-i2s.2",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "spdif",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 0),
},
};
static struct clk init_clocks[] = {
{
.name = "hclk_imem",
.parent = &clk_hclk_msys.clk,
.ctrlbit = (1 << 5),
.enable = s5pv210_clk_ip0_ctrl,
.ops = &clk_hclk_imem_ops,
}, {
.name = "uart",
.devname = "s5pv210-uart.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "uart",
.devname = "s5pv210-uart.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "uart",
.devname = "s5pv210-uart.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "uart",
.devname = "s5pv210-uart.3",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "sromc",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 26),
},
};
static struct clk clk_hsmmc0 = {
.name = "hsmmc",
.devname = "s3c-sdhci.0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<16),
};
static struct clk clk_hsmmc1 = {
.name = "hsmmc",
.devname = "s3c-sdhci.1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<17),
};
static struct clk clk_hsmmc2 = {
.name = "hsmmc",
.devname = "s3c-sdhci.2",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<18),
};
static struct clk clk_hsmmc3 = {
.name = "hsmmc",
.devname = "s3c-sdhci.3",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<19),
};
static struct clk clk_pdma0 = {
.name = "pdma0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3),
};
static struct clk clk_pdma1 = {
.name = "pdma1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 4),
};
static struct clk *clkset_uart_list[] = {
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
};
static struct clksrc_sources clkset_uart = {
.sources = clkset_uart_list,
.nr_sources = ARRAY_SIZE(clkset_uart_list),
};
static struct clk *clkset_group1_list[] = {
[0] = &clk_sclk_a2m.clk,
[1] = &clk_mout_mpll.clk,
[2] = &clk_mout_epll.clk,
[3] = &clk_sclk_vpll.clk,
};
static struct clksrc_sources clkset_group1 = {
.sources = clkset_group1_list,
.nr_sources = ARRAY_SIZE(clkset_group1_list),
};
static struct clk *clkset_sclk_onenand_list[] = {
[0] = &clk_hclk_psys.clk,
[1] = &clk_hclk_dsys.clk,
};
static struct clksrc_sources clkset_sclk_onenand = {
.sources = clkset_sclk_onenand_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_onenand_list),
};
static struct clk *clkset_sclk_dac_list[] = {
[0] = &clk_sclk_vpll.clk,
[1] = &clk_sclk_hdmiphy,
};
static struct clksrc_sources clkset_sclk_dac = {
.sources = clkset_sclk_dac_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_dac_list),
};
static struct clksrc_clk clk_sclk_dac = {
.clk = {
.name = "sclk_dac",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 2),
},
.sources = &clkset_sclk_dac,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 1 },
};
static struct clksrc_clk clk_sclk_pixel = {
.clk = {
.name = "sclk_pixel",
.parent = &clk_sclk_vpll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4},
};
static struct clk *clkset_sclk_hdmi_list[] = {
[0] = &clk_sclk_pixel.clk,
[1] = &clk_sclk_hdmiphy,
};
static struct clksrc_sources clkset_sclk_hdmi = {
.sources = clkset_sclk_hdmi_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_hdmi_list),
};
static struct clksrc_clk clk_sclk_hdmi = {
.clk = {
.name = "sclk_hdmi",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &clkset_sclk_hdmi,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
};
static struct clk *clkset_sclk_mixer_list[] = {
[0] = &clk_sclk_dac.clk,
[1] = &clk_sclk_hdmi.clk,
};
static struct clksrc_sources clkset_sclk_mixer = {
.sources = clkset_sclk_mixer_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_mixer_list),
};
static struct clksrc_clk clk_sclk_mixer = {
.clk = {
.name = "sclk_mixer",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 1),
},
.sources = &clkset_sclk_mixer,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 },
};
static struct clksrc_clk *sclk_tv[] = {
&clk_sclk_dac,
&clk_sclk_pixel,
&clk_sclk_hdmi,
&clk_sclk_mixer,
};
static struct clk *clkset_sclk_audio0_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_pcmcdclk0,
[2] = &clk_sclk_hdmi27m,
[3] = &clk_sclk_usbphy0,
[4] = &clk_sclk_usbphy1,
[5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
[8] = &clk_sclk_vpll.clk,
};
static struct clksrc_sources clkset_sclk_audio0 = {
.sources = clkset_sclk_audio0_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_audio0_list),
};
static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "sclk_audio",
.devname = "soc-audio.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &clkset_sclk_audio0,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 0, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 0, .size = 4 },
};
static struct clk *clkset_sclk_audio1_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_pcmcdclk1,
[2] = &clk_sclk_hdmi27m,
[3] = &clk_sclk_usbphy0,
[4] = &clk_sclk_usbphy1,
[5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
[8] = &clk_sclk_vpll.clk,
};
static struct clksrc_sources clkset_sclk_audio1 = {
.sources = clkset_sclk_audio1_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_audio1_list),
};
static struct clksrc_clk clk_sclk_audio1 = {
.clk = {
.name = "sclk_audio",
.devname = "soc-audio.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 25),
},
.sources = &clkset_sclk_audio1,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 4, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 4, .size = 4 },
};
static struct clk *clkset_sclk_audio2_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_pcmcdclk0,
[2] = &clk_sclk_hdmi27m,
[3] = &clk_sclk_usbphy0,
[4] = &clk_sclk_usbphy1,
[5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
[8] = &clk_sclk_vpll.clk,
};
static struct clksrc_sources clkset_sclk_audio2 = {
.sources = clkset_sclk_audio2_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_audio2_list),
};
static struct clksrc_clk clk_sclk_audio2 = {
.clk = {
.name = "sclk_audio",
.devname = "soc-audio.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 26),
},
.sources = &clkset_sclk_audio2,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 8, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 8, .size = 4 },
};
static struct clk *clkset_sclk_spdif_list[] = {
[0] = &clk_sclk_audio0.clk,
[1] = &clk_sclk_audio1.clk,
[2] = &clk_sclk_audio2.clk,
};
static struct clksrc_sources clkset_sclk_spdif = {
.sources = clkset_sclk_spdif_list,
.nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list),
};
static struct clksrc_clk clk_sclk_spdif = {
.clk = {
.name = "sclk_spdif",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 27),
.ops = &s5p_sclk_spdif_ops,
},
.sources = &clkset_sclk_spdif,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 },
};
static struct clk *clkset_group2_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_xusbxti,
[2] = &clk_sclk_hdmi27m,
[3] = &clk_sclk_usbphy0,
[4] = &clk_sclk_usbphy1,
[5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
[8] = &clk_sclk_vpll.clk,
};
static struct clksrc_sources clkset_group2 = {
.sources = clkset_group2_list,
.nr_sources = ARRAY_SIZE(clkset_group2_list),
};
static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_dmc",
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 },
}, {
.clk = {
.name = "sclk_onenand",
},
.sources = &clkset_sclk_onenand,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 28, .size = 1 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 12, .size = 3 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "s5pv210-fimc.0",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 2),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "s5pv210-fimc.1",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 3),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "s5pv210-fimc.2",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
}, {
.clk = {
.name = "sclk_cam0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 3),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
}, {
.clk = {
.name = "sclk_cam1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 4 },
}, {
.clk = {
.name = "sclk_fimd",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 5),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 4 },
}, {
.clk = {
.name = "sclk_mfc",
.devname = "s5p-mfc",
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
}, {
.clk = {
.name = "sclk_g2d",
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 },
}, {
.clk = {
.name = "sclk_g3d",
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_csis",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 6),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 28, .size = 4 },
}, {
.clk = {
.name = "sclk_pwi",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 29),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 20, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 24, .size = 4 },
}, {
.clk = {
.name = "sclk_pwm",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 19),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 12, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV5, .shift = 12, .size = 4 },
},
};
static struct clksrc_clk clk_sclk_uart0 = {
.clk = {
.name = "uclk1",
.devname = "s5pv210-uart.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
};
static struct clksrc_clk clk_sclk_uart1 = {
.clk = {
.name = "uclk1",
.devname = "s5pv210-uart.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 13),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
};
static struct clksrc_clk clk_sclk_uart2 = {
.clk = {
.name = "uclk1",
.devname = "s5pv210-uart.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 14),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 },
};
static struct clksrc_clk clk_sclk_uart3 = {
.clk = {
.name = "uclk1",
.devname = "s5pv210-uart.3",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 15),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 },
};
static struct clksrc_clk clk_sclk_mmc0 = {
.clk = {
.name = "sclk_mmc",
.devname = "s3c-sdhci.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 },
};
static struct clksrc_clk clk_sclk_mmc1 = {
.clk = {
.name = "sclk_mmc",
.devname = "s3c-sdhci.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 9),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 },
};
static struct clksrc_clk clk_sclk_mmc2 = {
.clk = {
.name = "sclk_mmc",
.devname = "s3c-sdhci.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 10),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 },
};
static struct clksrc_clk clk_sclk_mmc3 = {
.clk = {
.name = "sclk_mmc",
.devname = "s3c-sdhci.3",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 11),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
};
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
.devname = "s5pv210-spi.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 },
};
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
.devname = "s5pv210-spi.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 17),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 },
};
static struct clksrc_clk *clksrc_cdev[] = {
&clk_sclk_uart0,
&clk_sclk_uart1,
&clk_sclk_uart2,
&clk_sclk_uart3,
&clk_sclk_mmc0,
&clk_sclk_mmc1,
&clk_sclk_mmc2,
&clk_sclk_mmc3,
&clk_sclk_spi0,
&clk_sclk_spi1,
};
static struct clk *clk_cdev[] = {
&clk_hsmmc0,
&clk_hsmmc1,
&clk_hsmmc2,
&clk_hsmmc3,
&clk_pdma0,
&clk_pdma1,
};
/* Clock initialisation code */
static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
&clk_mout_epll,
&clk_mout_mpll,
&clk_armclk,
&clk_hclk_msys,
&clk_sclk_a2m,
&clk_hclk_dsys,
&clk_hclk_psys,
&clk_pclk_msys,
&clk_pclk_dsys,
&clk_pclk_psys,
&clk_vpllsrc,
&clk_sclk_vpll,
&clk_mout_dmc0,
&clk_sclk_dmc0,
&clk_sclk_audio0,
&clk_sclk_audio1,
&clk_sclk_audio2,
&clk_sclk_spdif,
};
static u32 epll_div[][6] = {
{ 48000000, 0, 48, 3, 3, 0 },
{ 96000000, 0, 48, 3, 2, 0 },
{ 144000000, 1, 72, 3, 2, 0 },
{ 192000000, 0, 48, 3, 1, 0 },
{ 288000000, 1, 72, 3, 1, 0 },
{ 32750000, 1, 65, 3, 4, 35127 },
{ 32768000, 1, 65, 3, 4, 35127 },
{ 45158400, 0, 45, 3, 3, 10355 },
{ 45000000, 0, 45, 3, 3, 10355 },
{ 45158000, 0, 45, 3, 3, 10355 },
{ 49125000, 0, 49, 3, 3, 9961 },
{ 49152000, 0, 49, 3, 3, 9961 },
{ 67737600, 1, 67, 3, 3, 48366 },
{ 67738000, 1, 67, 3, 3, 48366 },
{ 73800000, 1, 73, 3, 3, 47710 },
{ 73728000, 1, 73, 3, 3, 47710 },
{ 36000000, 1, 32, 3, 4, 0 },
{ 60000000, 1, 60, 3, 3, 0 },
{ 72000000, 1, 72, 3, 3, 0 },
{ 80000000, 1, 80, 3, 3, 0 },
{ 84000000, 0, 42, 3, 2, 0 },
{ 50000000, 0, 50, 3, 3, 0 },
};
static int s5pv210_epll_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int epll_con, epll_con_k;
unsigned int i;
/* Return if nothing changed */
if (clk->rate == rate)
return 0;
epll_con = __raw_readl(S5P_EPLL_CON);
epll_con_k = __raw_readl(S5P_EPLL_CON1);
epll_con_k &= ~PLL46XX_KDIV_MASK;
epll_con &= ~(1 << 27 |
PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT |
PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT |
PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
for (i = 0; i < ARRAY_SIZE(epll_div); i++) {
if (epll_div[i][0] == rate) {
epll_con_k |= epll_div[i][5] << 0;
epll_con |= (epll_div[i][1] << 27 |
epll_div[i][2] << PLL46XX_MDIV_SHIFT |
epll_div[i][3] << PLL46XX_PDIV_SHIFT |
epll_div[i][4] << PLL46XX_SDIV_SHIFT);
break;
}
}
if (i == ARRAY_SIZE(epll_div)) {
printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n",
__func__);
return -EINVAL;
}
__raw_writel(epll_con, S5P_EPLL_CON);
__raw_writel(epll_con_k, S5P_EPLL_CON1);
printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n",
clk->rate, rate);
clk->rate = rate;
return 0;
}
static struct clk_ops s5pv210_epll_ops = {
.set_rate = s5pv210_epll_set_rate,
.get_rate = s5p_epll_get_rate,
};
static u32 vpll_div[][5] = {
{ 54000000, 3, 53, 3, 0 },
{ 108000000, 3, 53, 2, 0 },
};
static unsigned long s5pv210_vpll_get_rate(struct clk *clk)
{
return clk->rate;
}
static int s5pv210_vpll_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int vpll_con;
unsigned int i;
/* Return if nothing changed */
if (clk->rate == rate)
return 0;
vpll_con = __raw_readl(S5P_VPLL_CON);
vpll_con &= ~(0x1 << 27 | \
PLL90XX_MDIV_MASK << PLL90XX_MDIV_SHIFT | \
PLL90XX_PDIV_MASK << PLL90XX_PDIV_SHIFT | \
PLL90XX_SDIV_MASK << PLL90XX_SDIV_SHIFT);
for (i = 0; i < ARRAY_SIZE(vpll_div); i++) {
if (vpll_div[i][0] == rate) {
vpll_con |= vpll_div[i][1] << PLL90XX_PDIV_SHIFT;
vpll_con |= vpll_div[i][2] << PLL90XX_MDIV_SHIFT;
vpll_con |= vpll_div[i][3] << PLL90XX_SDIV_SHIFT;
vpll_con |= vpll_div[i][4] << 27;
break;
}
}
if (i == ARRAY_SIZE(vpll_div)) {
printk(KERN_ERR "%s: Invalid Clock VPLL Frequency\n",
__func__);
return -EINVAL;
}
__raw_writel(vpll_con, S5P_VPLL_CON);
/* Wait for VPLL lock */
while (!(__raw_readl(S5P_VPLL_CON) & (1 << PLL90XX_LOCKED_SHIFT)))
continue;
clk->rate = rate;
return 0;
}
static struct clk_ops s5pv210_vpll_ops = {
.get_rate = s5pv210_vpll_get_rate,
.set_rate = s5pv210_vpll_set_rate,
};
void __init_or_cpufreq s5pv210_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long vpllsrc;
unsigned long armclk;
unsigned long hclk_msys;
unsigned long hclk_dsys;
unsigned long hclk_psys;
unsigned long pclk_msys;
unsigned long pclk_dsys;
unsigned long pclk_psys;
unsigned long apll;
unsigned long mpll;
unsigned long epll;
unsigned long vpll;
unsigned int ptr;
u32 clkdiv0, clkdiv1;
/* Set functions for clk_fout_epll */
clk_fout_epll.enable = s5p_epll_enable;
clk_fout_epll.ops = &s5pv210_epll_ops;
printk(KERN_DEBUG "%s: registering clocks\n", __func__);
clkdiv0 = __raw_readl(S5P_CLK_DIV0);
clkdiv1 = __raw_readl(S5P_CLK_DIV1);
printk(KERN_DEBUG "%s: clkdiv0 = %08x, clkdiv1 = %08x\n",
__func__, clkdiv0, clkdiv1);
xtal_clk = clk_get(NULL, "xtal");
BUG_ON(IS_ERR(xtal_clk));
xtal = clk_get_rate(xtal_clk);
clk_put(xtal_clk);
printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON),
__raw_readl(S5P_EPLL_CON1), pll_4600);
vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
vpll = s5p_get_pll45xx(vpllsrc, __raw_readl(S5P_VPLL_CON), pll_4502);
clk_fout_apll.ops = &clk_fout_apll_ops;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
clk_fout_vpll.ops = &s5pv210_vpll_ops;
clk_fout_vpll.rate = vpll;
printk(KERN_INFO "S5PV210: PLL settings, A=%ld, M=%ld, E=%ld V=%ld",
apll, mpll, epll, vpll);
armclk = clk_get_rate(&clk_armclk.clk);
hclk_msys = clk_get_rate(&clk_hclk_msys.clk);
hclk_dsys = clk_get_rate(&clk_hclk_dsys.clk);
hclk_psys = clk_get_rate(&clk_hclk_psys.clk);
pclk_msys = clk_get_rate(&clk_pclk_msys.clk);
pclk_dsys = clk_get_rate(&clk_pclk_dsys.clk);
pclk_psys = clk_get_rate(&clk_pclk_psys.clk);
printk(KERN_INFO "S5PV210: ARMCLK=%ld, HCLKM=%ld, HCLKD=%ld\n"
"HCLKP=%ld, PCLKM=%ld, PCLKD=%ld, PCLKP=%ld\n",
armclk, hclk_msys, hclk_dsys, hclk_psys,
pclk_msys, pclk_dsys, pclk_psys);
clk_f.rate = armclk;
clk_h.rate = hclk_psys;
clk_p.rate = pclk_psys;
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_set_clksrc(&clksrcs[ptr], true);
}
static struct clk *clks[] __initdata = {
&clk_sclk_hdmi27m,
&clk_sclk_hdmiphy,
&clk_sclk_usbphy0,
&clk_sclk_usbphy1,
&clk_pcmcdclk0,
&clk_pcmcdclk1,
&clk_pcmcdclk2,
};
static struct clk_lookup s5pv210_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud0", &clk_p),
CLKDEV_INIT("s5pv210-uart.0", "clk_uart_baud1", &clk_sclk_uart0.clk),
CLKDEV_INIT("s5pv210-uart.1", "clk_uart_baud1", &clk_sclk_uart1.clk),
CLKDEV_INIT("s5pv210-uart.2", "clk_uart_baud1", &clk_sclk_uart2.clk),
CLKDEV_INIT("s5pv210-uart.3", "clk_uart_baud1", &clk_sclk_uart3.clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &clk_hsmmc0),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &clk_hsmmc1),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.0", &clk_hsmmc2),
CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.0", &clk_hsmmc3),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
};
void __init s5pv210_register_clocks(void)
{
int ptr;
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
s3c_register_clksrc(sysclks[ptr], 1);
for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++)
s3c_register_clksrc(sclk_tv[ptr], 1);
for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++)
s3c_register_clksrc(clksrc_cdev[ptr], 1);
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
clkdev_add_table(s5pv210_clk_lookup, ARRAY_SIZE(s5pv210_clk_lookup));
s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev));
for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
s3c_disable_clocks(clk_cdev[ptr], 1);
}
| gpl-2.0 |
HCDRJacob/htc-kernel-wildfire-2.6.32 | drivers/net/wireless/orinoco/orinoco_nortel.c | 524 | 8558 | /* orinoco_nortel.c
*
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in
* Nortel emobility, Symbol LA-4113 and Symbol LA-4123.
*
* Copyright (C) 2002 Tobias Hoffmann
* (C) 2003 Christoph Jungegger <disdos@traum404.de>
*
* Some of this code is borrowed from orinoco_plx.c
* Copyright (C) 2001 Daniel Barlow
* Some of this code is borrowed from orinoco_pci.c
* Copyright (C) 2001 Jean Tourrilhes
* Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
* has been copied from it. linux-wlan-ng-0.1.10 is originally :
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License
* at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and
* limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU General Public License version 2 (the "GPL"), in
* which case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice and
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
*/
#define DRIVER_NAME "orinoco_nortel"
#define PFX DRIVER_NAME ": "
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <pcmcia/cisreg.h>
#include "orinoco.h"
#include "orinoco_pci.h"
#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
/*
* Do a soft reset of the card using the Configuration Option Register
* We need this to get going...
* This is the part of the code that is strongly inspired from wlan-ng
*
* Note bis : Don't try to access HERMES_CMD during the reset phase.
* It just won't work !
*/
static int orinoco_nortel_cor_reset(struct orinoco_private *priv)
{
struct orinoco_pci_card *card = priv->card;
/* Assert the reset until the card notices */
iowrite16(8, card->bridge_io + 2);
ioread16(card->attr_io + COR_OFFSET);
iowrite16(0x80, card->attr_io + COR_OFFSET);
mdelay(1);
/* Give time for the card to recover from this hard effort */
iowrite16(0, card->attr_io + COR_OFFSET);
iowrite16(0, card->attr_io + COR_OFFSET);
mdelay(1);
/* Set COR as usual */
iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
iowrite16(0x228, card->bridge_io + 2);
return 0;
}
static int orinoco_nortel_hw_init(struct orinoco_pci_card *card)
{
int i;
u32 reg;
/* Setup bridge */
if (ioread16(card->bridge_io) & 1) {
printk(KERN_ERR PFX "brg1 answer1 wrong\n");
return -EBUSY;
}
iowrite16(0x118, card->bridge_io + 2);
iowrite16(0x108, card->bridge_io + 2);
mdelay(30);
iowrite16(0x8, card->bridge_io + 2);
for (i = 0; i < 30; i++) {
mdelay(30);
if (ioread16(card->bridge_io) & 0x10)
break;
}
if (i == 30) {
printk(KERN_ERR PFX "brg1 timed out\n");
return -EBUSY;
}
if (ioread16(card->attr_io + COR_OFFSET) & 1) {
printk(KERN_ERR PFX "brg2 answer1 wrong\n");
return -EBUSY;
}
if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) {
printk(KERN_ERR PFX "brg2 answer2 wrong\n");
return -EBUSY;
}
if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) {
printk(KERN_ERR PFX "brg2 answer3 wrong\n");
return -EBUSY;
}
/* Set the PCMCIA COR register */
iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
reg = ioread16(card->attr_io + COR_OFFSET);
if (reg != COR_VALUE) {
printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n",
reg);
return -EBUSY;
}
/* Set LEDs */
iowrite16(1, card->bridge_io + 10);
return 0;
}
static int orinoco_nortel_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
struct orinoco_private *priv;
struct orinoco_pci_card *card;
void __iomem *hermes_io, *bridge_io, *attr_io;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR PFX "Cannot enable PCI device\n");
return err;
}
err = pci_request_regions(pdev, DRIVER_NAME);
if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
bridge_io = pci_iomap(pdev, 0, 0);
if (!bridge_io) {
printk(KERN_ERR PFX "Cannot map bridge registers\n");
err = -EIO;
goto fail_map_bridge;
}
attr_io = pci_iomap(pdev, 1, 0);
if (!attr_io) {
printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
err = -EIO;
goto fail_map_attr;
}
hermes_io = pci_iomap(pdev, 2, 0);
if (!hermes_io) {
printk(KERN_ERR PFX "Cannot map chipset registers\n");
err = -EIO;
goto fail_map_hermes;
}
/* Allocate network device */
priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
orinoco_nortel_cor_reset, NULL);
if (!priv) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
}
card = priv->card;
card->bridge_io = bridge_io;
card->attr_io = attr_io;
hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
DRIVER_NAME, priv);
if (err) {
printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
err = -EBUSY;
goto fail_irq;
}
err = orinoco_nortel_hw_init(card);
if (err) {
printk(KERN_ERR PFX "Hardware initialization failed\n");
goto fail;
}
err = orinoco_nortel_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
goto fail;
}
err = orinoco_init(priv);
if (err) {
printk(KERN_ERR PFX "orinoco_init() failed\n");
goto fail;
}
err = orinoco_if_add(priv, 0, 0);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
}
pci_set_drvdata(pdev, priv);
return 0;
fail:
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
pci_iounmap(pdev, hermes_io);
fail_map_hermes:
pci_iounmap(pdev, attr_io);
fail_map_attr:
pci_iounmap(pdev, bridge_io);
fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
pci_disable_device(pdev);
return err;
}
static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
{
struct orinoco_private *priv = pci_get_drvdata(pdev);
struct orinoco_pci_card *card = priv->card;
/* Clear LEDs */
iowrite16(0, card->bridge_io + 10);
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static struct pci_device_id orinoco_nortel_id_table[] = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
{0x1562, 0x0001, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table);
static struct pci_driver orinoco_nortel_driver = {
.name = DRIVER_NAME,
.id_table = orinoco_nortel_id_table,
.probe = orinoco_nortel_init_one,
.remove = __devexit_p(orinoco_nortel_remove_one),
.suspend = orinoco_pci_suspend,
.resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (Tobias Hoffmann & Christoph Jungegger <disdos@traum404.de>)";
MODULE_AUTHOR("Christoph Jungegger <disdos@traum404.de>");
MODULE_DESCRIPTION
("Driver for wireless LAN cards using the Nortel PCI bridge");
MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_nortel_init(void)
{
printk(KERN_DEBUG "%s\n", version);
return pci_register_driver(&orinoco_nortel_driver);
}
static void __exit orinoco_nortel_exit(void)
{
pci_unregister_driver(&orinoco_nortel_driver);
}
module_init(orinoco_nortel_init);
module_exit(orinoco_nortel_exit);
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* tab-width: 8
* End:
*/
| gpl-2.0 |
palmer-dabbelt/linux | sound/pci/emu10k1/emu10k1.c | 1036 | 8593 | /*
* The driver for the EMU10K1 (SB Live!) based soundcards
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
* Copyright (c) by James Courtier-Dutton <James@superbug.demon.co.uk>
* Added support for Audigy 2 Value.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/emu10k1.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("EMU10K1");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS},"
"{Creative Labs,SB Audigy}}");
#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
#define ENABLE_SYNTH
#include <sound/emu10k1_synth.h>
#endif
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
static int extin[SNDRV_CARDS];
static int extout[SNDRV_CARDS];
static int seq_ports[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 4};
static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
static bool enable_ir[SNDRV_CARDS];
static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for the EMU10K1 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable the EMU10K1 soundcard.");
module_param_array(extin, int, NULL, 0444);
MODULE_PARM_DESC(extin, "Available external inputs for FX8010. Zero=default.");
module_param_array(extout, int, NULL, 0444);
MODULE_PARM_DESC(extout, "Available external outputs for FX8010. Zero=default.");
module_param_array(seq_ports, int, NULL, 0444);
MODULE_PARM_DESC(seq_ports, "Allocated sequencer ports for internal synthesizer.");
module_param_array(max_synth_voices, int, NULL, 0444);
MODULE_PARM_DESC(max_synth_voices, "Maximum number of voices for WaveTable.");
module_param_array(max_buffer_size, int, NULL, 0444);
MODULE_PARM_DESC(max_buffer_size, "Maximum sample buffer size in MB.");
module_param_array(enable_ir, bool, NULL, 0444);
MODULE_PARM_DESC(enable_ir, "Enable IR.");
module_param_array(subsystem, uint, NULL, 0444);
MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
module_param_array(delay_pcm_irq, uint, NULL, 0444);
MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
/*
* Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
*/
static const struct pci_device_id snd_emu10k1_ids[] = {
{ PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */
{ PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */
{ PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */
{ 0, }
};
/*
* Audigy 2 Value notes:
* A_IOCFG Input (GPIO)
* 0x400 = Front analog jack plugged in. (Green socket)
* 0x1000 = Read analog jack plugged in. (Black socket)
* 0x2000 = Center/LFE analog jack plugged in. (Orange socket)
* A_IOCFG Output (GPIO)
* 0x60 = Sound out of front Left.
* Win sets it to 0xXX61
*/
MODULE_DEVICE_TABLE(pci, snd_emu10k1_ids);
static int snd_card_emu10k1_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_emu10k1 *emu;
#ifdef ENABLE_SYNTH
struct snd_seq_device *wave = NULL;
#endif
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
0, &card);
if (err < 0)
return err;
if (max_buffer_size[dev] < 32)
max_buffer_size[dev] = 32;
else if (max_buffer_size[dev] > 1024)
max_buffer_size[dev] = 1024;
if ((err = snd_emu10k1_create(card, pci, extin[dev], extout[dev],
(long)max_buffer_size[dev] * 1024 * 1024,
enable_ir[dev], subsystem[dev],
&emu)) < 0)
goto error;
card->private_data = emu;
emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
if ((err = snd_emu10k1_pcm(emu, 0)) < 0)
goto error;
if ((err = snd_emu10k1_pcm_mic(emu, 1)) < 0)
goto error;
if ((err = snd_emu10k1_pcm_efx(emu, 2)) < 0)
goto error;
/* This stores the periods table. */
if (emu->card_capabilities->ca0151_chip) { /* P16V */
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
1024, &emu->p16v_buffer)) < 0)
goto error;
}
if ((err = snd_emu10k1_mixer(emu, 0, 3)) < 0)
goto error;
if ((err = snd_emu10k1_timer(emu, 0)) < 0)
goto error;
if ((err = snd_emu10k1_pcm_multi(emu, 3)) < 0)
goto error;
if (emu->card_capabilities->ca0151_chip) { /* P16V */
if ((err = snd_p16v_pcm(emu, 4)) < 0)
goto error;
}
if (emu->audigy) {
if ((err = snd_emu10k1_audigy_midi(emu)) < 0)
goto error;
} else {
if ((err = snd_emu10k1_midi(emu)) < 0)
goto error;
}
if ((err = snd_emu10k1_fx8010_new(emu, 0)) < 0)
goto error;
#ifdef ENABLE_SYNTH
if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH,
sizeof(struct snd_emu10k1_synth_arg), &wave) < 0 ||
wave == NULL) {
dev_warn(emu->card->dev,
"can't initialize Emu10k1 wavetable synth\n");
} else {
struct snd_emu10k1_synth_arg *arg;
arg = SNDRV_SEQ_DEVICE_ARGPTR(wave);
strcpy(wave->name, "Emu-10k1 Synth");
arg->hwptr = emu;
arg->index = 1;
arg->seq_ports = seq_ports[dev];
arg->max_voices = max_synth_voices[dev];
}
#endif
strlcpy(card->driver, emu->card_capabilities->driver,
sizeof(card->driver));
strlcpy(card->shortname, emu->card_capabilities->name,
sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
if ((err = snd_card_register(card)) < 0)
goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
error:
snd_card_free(card);
return err;
}
static void snd_card_emu10k1_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
}
#ifdef CONFIG_PM_SLEEP
static int snd_emu10k1_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
emu->suspend = 1;
snd_pcm_suspend_all(emu->pcm);
snd_pcm_suspend_all(emu->pcm_mic);
snd_pcm_suspend_all(emu->pcm_efx);
snd_pcm_suspend_all(emu->pcm_multi);
snd_pcm_suspend_all(emu->pcm_p16v);
snd_ac97_suspend(emu->ac97);
snd_emu10k1_efx_suspend(emu);
snd_emu10k1_suspend_regs(emu);
if (emu->card_capabilities->ca0151_chip)
snd_p16v_suspend(emu);
snd_emu10k1_done(emu);
return 0;
}
static int snd_emu10k1_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
snd_emu10k1_resume_init(emu);
snd_emu10k1_efx_resume(emu);
snd_ac97_resume(emu->ac97);
snd_emu10k1_resume_regs(emu);
if (emu->card_capabilities->ca0151_chip)
snd_p16v_resume(emu);
emu->suspend = 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_emu10k1_pm, snd_emu10k1_suspend, snd_emu10k1_resume);
#define SND_EMU10K1_PM_OPS &snd_emu10k1_pm
#else
#define SND_EMU10K1_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver emu10k1_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_emu10k1_ids,
.probe = snd_card_emu10k1_probe,
.remove = snd_card_emu10k1_remove,
.driver = {
.pm = SND_EMU10K1_PM_OPS,
},
};
module_pci_driver(emu10k1_driver);
| gpl-2.0 |
mythos234/AndromedaN910F-LL | drivers/acpi/glue.c | 1804 | 8722 | /*
* Link physical devices with ACPI devices support
*
* Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com>
* Copyright (c) 2005 Intel Corp.
*
* This file is released under the GPLv2.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include "internal.h"
#define ACPI_GLUE_DEBUG 0
#if ACPI_GLUE_DEBUG
#define DBG(fmt, ...) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__)
#else
#define DBG(fmt, ...) \
do { \
if (0) \
printk(KERN_DEBUG PREFIX fmt, ##__VA_ARGS__); \
} while (0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
if (type && type->match && type->find_device) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(register_acpi_bus_type);
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return 0;
if (type) {
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
printk(KERN_INFO PREFIX "bus type %s unregistered\n",
type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->match(dev)) {
ret = tmp;
break;
}
}
up_read(&bus_type_sem);
return ret;
}
static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
struct acpi_device *adev = NULL;
acpi_bus_get_device(handle, &adev);
if (adev) {
*ret_p = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
unsigned long long sta;
acpi_status status;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return false;
if (is_bridge) {
void *test = NULL;
/* Check if this object has at least one child device. */
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_dev_present, NULL, NULL, &test);
return !!test;
}
return true;
}
struct find_child_context {
u64 addr;
bool is_bridge;
acpi_handle ret;
bool ret_checked;
};
static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
void *data, void **not_used)
{
struct find_child_context *context = data;
unsigned long long addr;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status) || addr != context->addr)
return AE_OK;
if (!context->ret) {
/* This is the first matching object. Save its handle. */
context->ret = handle;
return AE_OK;
}
/*
* There is more than one matching object with the same _ADR value.
* That really is unexpected, so we are kind of beyond the scope of the
* spec here. We have to choose which one to return, though.
*
* First, check if the previously found object is good enough and return
* its handle if so. Second, check the same for the object that we've
* just found.
*/
if (!context->ret_checked) {
if (acpi_extra_checks_passed(context->ret, context->is_bridge))
return AE_CTRL_TERMINATE;
else
context->ret_checked = true;
}
if (acpi_extra_checks_passed(handle, context->is_bridge)) {
context->ret = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
{
if (parent) {
struct find_child_context context = {
.addr = addr,
.is_bridge = is_bridge,
};
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
NULL, &context, NULL);
return context.ret;
}
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_find_child);
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
if (handle) {
dev_warn(dev, "ACPI handle is already set\n");
return -EINVAL;
} else {
handle = ACPI_HANDLE(dev);
}
}
if (!handle)
return -EINVAL;
get_device(dev);
status = acpi_bus_get_device(handle, &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
/* Sanity check. */
list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
goto err_free;
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
if (!ACPI_HANDLE(dev))
ACPI_HANDLE_SET(dev, acpi_dev->handle);
if (!physical_node->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
else
sprintf(physical_node_name,
"physical_node%d", physical_node->node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
physical_node_name);
retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
"firmware_node");
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
return 0;
err:
ACPI_HANDLE_SET(dev, NULL);
put_device(dev);
return retval;
err_free:
mutex_unlock(&acpi_dev->physical_node_lock);
kfree(physical_node);
goto err;
}
static int acpi_unbind_one(struct device *dev)
{
struct acpi_device_physical_node *entry;
struct acpi_device *acpi_dev;
acpi_status status;
struct list_head *node, *next;
if (!ACPI_HANDLE(dev))
return 0;
status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
entry = list_entry(node, struct acpi_device_physical_node,
node);
if (entry->dev != dev)
continue;
list_del(node);
clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;
if (!entry->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
else
sprintf(physical_node_name,
"physical_node%d", entry->node_id);
sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
ACPI_HANDLE_SET(dev, NULL);
/* acpi_bind_one increase refcnt by one */
put_device(dev);
kfree(entry);
}
mutex_unlock(&acpi_dev->physical_node_lock);
return 0;
err:
dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
return -EINVAL;
}
static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
acpi_handle handle;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret && type) {
ret = type->find_device(dev, &handle);
if (ret) {
DBG("Unable to get handle for %s\n", dev_name(dev));
goto out;
}
ret = acpi_bind_one(dev, handle);
if (ret)
goto out;
}
if (type && type->setup)
type->setup(dev);
out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
DBG("Device %s -> No ACPI support\n", dev_name(dev));
#endif
return ret;
}
static int acpi_platform_notify_remove(struct device *dev)
{
struct acpi_bus_type *type;
type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
acpi_unbind_one(dev);
return 0;
}
int __init init_acpi_device_notify(void)
{
if (platform_notify || platform_notify_remove) {
printk(KERN_ERR PREFIX "Can't use platform_notify\n");
return 0;
}
platform_notify = acpi_platform_notify;
platform_notify_remove = acpi_platform_notify_remove;
return 0;
}
| gpl-2.0 |
yi9/linux | fs/jfs/jfs_imap.c | 2060 | 86132 | /*
* Copyright (C) International Business Machines Corp., 2000-2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* jfs_imap.c: inode allocation map manager
*
* Serialization:
* Each AG has a simple lock which is used to control the serialization of
* the AG level lists. This lock should be taken first whenever an AG
* level list will be modified or accessed.
*
* Each IAG is locked by obtaining the buffer for the IAG page.
*
* There is also a inode lock for the inode map inode. A read lock needs to
* be taken whenever an IAG is read from the map or the global level
* information is read. A write lock needs to be taken whenever the global
* level information is modified or an atomic operation needs to be used.
*
* If more than one IAG is read at one time, the read lock may not
* be given up until all of the IAG's are read. Otherwise, a deadlock
* may occur when trying to obtain the read lock while another thread
* holding the read lock is waiting on the IAG already being held.
*
* The control page of the inode map is read into memory by diMount().
* Thereafter it should only be modified in memory and then it will be
* written out when the filesystem is unmounted by diUnmount().
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
#include "jfs_dinode.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_debug.h"
/*
* imap locks
*/
/* iag free list lock */
#define IAGFREE_LOCK_INIT(imap) mutex_init(&imap->im_freelock)
#define IAGFREE_LOCK(imap) mutex_lock(&imap->im_freelock)
#define IAGFREE_UNLOCK(imap) mutex_unlock(&imap->im_freelock)
/* per ag iag list locks */
#define AG_LOCK_INIT(imap,index) mutex_init(&(imap->im_aglock[index]))
#define AG_LOCK(imap,agno) mutex_lock(&imap->im_aglock[agno])
#define AG_UNLOCK(imap,agno) mutex_unlock(&imap->im_aglock[agno])
/*
* forward references
*/
static int diAllocAG(struct inomap *, int, bool, struct inode *);
static int diAllocAny(struct inomap *, int, bool, struct inode *);
static int diAllocBit(struct inomap *, struct iag *, int);
static int diAllocExt(struct inomap *, int, struct inode *);
static int diAllocIno(struct inomap *, int, struct inode *);
static int diFindFree(u32, int);
static int diNewExt(struct inomap *, struct iag *, int);
static int diNewIAG(struct inomap *, int *, int, struct metapage **);
static void duplicateIXtree(struct super_block *, s64, int, s64 *);
static int diIAGRead(struct inomap * imap, int, struct metapage **);
static int copy_from_dinode(struct dinode *, struct inode *);
static void copy_to_dinode(struct dinode *, struct inode *);
/*
* NAME: diMount()
*
* FUNCTION: initialize the incore inode map control structures for
* a fileset or aggregate init time.
*
* the inode map's control structure (dinomap) is
* brought in from disk and placed in virtual memory.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diMount(struct inode *ipimap)
{
struct inomap *imap;
struct metapage *mp;
int index;
struct dinomap_disk *dinom_le;
/*
* allocate/initialize the in-memory inode map control structure
*/
/* allocate the in-memory inode map control structure. */
imap = kmalloc(sizeof(struct inomap), GFP_KERNEL);
if (imap == NULL) {
jfs_err("diMount: kmalloc returned NULL!");
return -ENOMEM;
}
/* read the on-disk inode map control structure. */
mp = read_metapage(ipimap,
IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
kfree(imap);
return -EIO;
}
/* copy the on-disk version to the in-memory version. */
dinom_le = (struct dinomap_disk *) mp->data;
imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag);
imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag);
atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos));
atomic_set(&imap->im_numfree, le32_to_cpu(dinom_le->in_numfree));
imap->im_nbperiext = le32_to_cpu(dinom_le->in_nbperiext);
imap->im_l2nbperiext = le32_to_cpu(dinom_le->in_l2nbperiext);
for (index = 0; index < MAXAG; index++) {
imap->im_agctl[index].inofree =
le32_to_cpu(dinom_le->in_agctl[index].inofree);
imap->im_agctl[index].extfree =
le32_to_cpu(dinom_le->in_agctl[index].extfree);
imap->im_agctl[index].numinos =
le32_to_cpu(dinom_le->in_agctl[index].numinos);
imap->im_agctl[index].numfree =
le32_to_cpu(dinom_le->in_agctl[index].numfree);
}
/* release the buffer. */
release_metapage(mp);
/*
* allocate/initialize inode allocation map locks
*/
/* allocate and init iag free list lock */
IAGFREE_LOCK_INIT(imap);
/* allocate and init ag list locks */
for (index = 0; index < MAXAG; index++) {
AG_LOCK_INIT(imap, index);
}
/* bind the inode map inode and inode map control structure
* to each other.
*/
imap->im_ipimap = ipimap;
JFS_IP(ipimap)->i_imap = imap;
return (0);
}
/*
* NAME: diUnmount()
*
* FUNCTION: write to disk the incore inode map control structures for
* a fileset or aggregate at unmount time.
*
* PARAMETERS:
* ipimap - pointer to inode map inode for the aggregate or fileset.
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient free virtual memory.
* -EIO - i/o error.
*/
int diUnmount(struct inode *ipimap, int mounterror)
{
struct inomap *imap = JFS_IP(ipimap)->i_imap;
/*
* update the on-disk inode map control structure
*/
if (!(mounterror || isReadOnly(ipimap)))
diSync(ipimap);
/*
* Invalidate the page cache buffers
*/
truncate_inode_pages(ipimap->i_mapping, 0);
/*
* free in-memory control structure
*/
kfree(imap);
return (0);
}
/*
* diSync()
*/
int diSync(struct inode *ipimap)
{
struct dinomap_disk *dinom_le;
struct inomap *imp = JFS_IP(ipimap)->i_imap;
struct metapage *mp;
int index;
/*
* write imap global conrol page
*/
/* read the on-disk inode map control structure */
mp = get_metapage(ipimap,
IMAPBLKNO << JFS_SBI(ipimap->i_sb)->l2nbperpage,
PSIZE, 0);
if (mp == NULL) {
jfs_err("diSync: get_metapage failed!");
return -EIO;
}
/* copy the in-memory version to the on-disk version */
dinom_le = (struct dinomap_disk *) mp->data;
dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag);
dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag);
dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos));
dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree));
dinom_le->in_nbperiext = cpu_to_le32(imp->im_nbperiext);
dinom_le->in_l2nbperiext = cpu_to_le32(imp->im_l2nbperiext);
for (index = 0; index < MAXAG; index++) {
dinom_le->in_agctl[index].inofree =
cpu_to_le32(imp->im_agctl[index].inofree);
dinom_le->in_agctl[index].extfree =
cpu_to_le32(imp->im_agctl[index].extfree);
dinom_le->in_agctl[index].numinos =
cpu_to_le32(imp->im_agctl[index].numinos);
dinom_le->in_agctl[index].numfree =
cpu_to_le32(imp->im_agctl[index].numfree);
}
/* write out the control structure */
write_metapage(mp);
/*
* write out dirty pages of imap
*/
filemap_write_and_wait(ipimap->i_mapping);
diWriteSpecial(ipimap, 0);
return (0);
}
/*
* NAME: diRead()
*
* FUNCTION: initialize an incore inode from disk.
*
* on entry, the specifed incore inode should itself
* specify the disk inode number corresponding to the
* incore inode (i.e. i_number should be initialized).
*
* this routine handles incore inode initialization for
* both "special" and "regular" inodes. special inodes
* are those required early in the mount process and
* require special handling since much of the file system
* is not yet initialized. these "special" inodes are
* identified by a NULL inode map inode pointer and are
* actually initialized by a call to diReadSpecial().
*
* for regular inodes, the iag describing the disk inode
* is read from disk to determine the inode extent address
* for the disk inode. with the inode extent address in
* hand, the page of the extent that contains the disk
* inode is read and the disk inode is copied to the
* incore inode.
*
* PARAMETERS:
* ip - pointer to incore inode to be initialized from disk.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
* -ENOMEM - insufficient memory
*
*/
int diRead(struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int iagno, ino, extno, rc;
struct inode *ipimap;
struct dinode *dp;
struct iag *iagp;
struct metapage *mp;
s64 blkno, agstart;
struct inomap *imap;
int block_offset;
int inodes_left;
unsigned long pageno;
int rel_inode;
jfs_info("diRead: ino = %ld", ip->i_ino);
ipimap = sbi->ipimap;
JFS_IP(ip)->ipimap = ipimap;
/* determine the iag number for this inode (number) */
iagno = INOTOIAG(ip->i_ino);
/* read the iag */
imap = JFS_IP(ipimap)->i_imap;
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap);
if (rc) {
jfs_err("diRead: diIAGRead returned %d", rc);
return (rc);
}
iagp = (struct iag *) mp->data;
/* determine inode extent that holds the disk inode */
ino = ip->i_ino & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) ||
(addressPXD(&iagp->inoext[extno]) == 0)) {
release_metapage(mp);
return -ESTALE;
}
/* get disk block number of the page within the inode extent
* that holds the disk inode.
*/
blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage);
/* get the ag for the iag */
agstart = le64_to_cpu(iagp->agstart);
release_metapage(mp);
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
/*
* OS/2 didn't always align inode extents on page boundaries
*/
inodes_left =
(sbi->nbperpage - block_offset) << sbi->l2niperblk;
if (rel_inode < inodes_left)
rel_inode += block_offset << sbi->l2niperblk;
else {
pageno += 1;
rel_inode -= inodes_left;
}
}
/* read the page of disk inode */
mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
if (!mp) {
jfs_err("diRead: read_metapage failed");
return -EIO;
}
/* locate the disk inode requested */
dp = (struct dinode *) mp->data;
dp += rel_inode;
if (ip->i_ino != le32_to_cpu(dp->di_number)) {
jfs_error(ip->i_sb, "i_ino != di_number\n");
rc = -EIO;
} else if (le32_to_cpu(dp->di_nlink) == 0)
rc = -ESTALE;
else
/* copy the disk inode to the in-memory inode */
rc = copy_from_dinode(dp, ip);
release_metapage(mp);
/* set the ag for the inode */
JFS_IP(ip)->agstart = agstart;
JFS_IP(ip)->active_ag = -1;
return (rc);
}
/*
* NAME: diReadSpecial()
*
* FUNCTION: initialize a 'special' inode from disk.
*
* this routines handles aggregate level inodes. The
* inode cache cannot differentiate between the
* aggregate inodes and the filesystem inodes, so we
* handle these here. We don't actually use the aggregate
* inode map, since these inodes are at a fixed location
* and in some cases the aggregate inode map isn't initialized
* yet.
*
* PARAMETERS:
* sb - filesystem superblock
* inum - aggregate inode number
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES:
* new inode - success
* NULL - i/o error.
*/
struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
uint address;
struct dinode *dp;
struct inode *ip;
struct metapage *mp;
ip = new_inode(sb);
if (ip == NULL) {
jfs_err("diReadSpecial: new_inode returned NULL!");
return ip;
}
if (secondary) {
address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
JFS_IP(ip)->ipimap = sbi->ipaimap2;
} else {
address = AITBL_OFF >> L2PSIZE;
JFS_IP(ip)->ipimap = sbi->ipaimap;
}
ASSERT(inum < INOSPEREXT);
ip->i_ino = inum;
address += inum >> 3; /* 8 inodes per 4K page */
/* read the page of fixed disk inode (AIT) in raw mode */
mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
if (mp == NULL) {
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
return (NULL);
}
/* get the pointer to the disk inode of interest */
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
if ((copy_from_dinode(dp, ip)) != 0) {
/* handle bad return by returning NULL for ip */
set_nlink(ip, 1); /* Don't want iput() deleting it */
iput(ip);
/* release the page */
release_metapage(mp);
return (NULL);
}
ip->i_mapping->a_ops = &jfs_metapage_aops;
mapping_set_gfp_mask(ip->i_mapping, GFP_NOFS);
/* Allocations to metadata inodes should not affect quotas */
ip->i_flags |= S_NOQUOTA;
if ((inum == FILESYSTEM_I) && (JFS_IP(ip)->ipimap == sbi->ipaimap)) {
sbi->gengen = le32_to_cpu(dp->di_gengen);
sbi->inostamp = le32_to_cpu(dp->di_inostamp);
}
/* release the page */
release_metapage(mp);
/*
* __mark_inode_dirty expects inodes to be hashed. Since we don't
* want special inodes in the fileset inode space, we make them
* appear hashed, but do not put on any lists. hlist_del()
* will work fine and require no locking.
*/
hlist_add_fake(&ip->i_hash);
return (ip);
}
/*
* NAME: diWriteSpecial()
*
* FUNCTION: Write the special inode to disk
*
* PARAMETERS:
* ip - special inode
* secondary - 1 if secondary aggregate inode table
*
* RETURN VALUES: none
*/
void diWriteSpecial(struct inode *ip, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
uint address;
struct dinode *dp;
ino_t inum = ip->i_ino;
struct metapage *mp;
if (secondary)
address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage;
else
address = AITBL_OFF >> L2PSIZE;
ASSERT(inum < INOSPEREXT);
address += inum >> 3; /* 8 inodes per 4K page */
/* read the page of fixed disk inode (AIT) in raw mode */
mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
if (mp == NULL) {
jfs_err("diWriteSpecial: failed to read aggregate inode "
"extent!");
return;
}
/* get the pointer to the disk inode of interest */
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
copy_to_dinode(dp, ip);
memcpy(&dp->di_xtroot, &JFS_IP(ip)->i_xtroot, 288);
if (inum == FILESYSTEM_I)
dp->di_gengen = cpu_to_le32(sbi->gengen);
/* write the page */
write_metapage(mp);
}
/*
* NAME: diFreeSpecial()
*
* FUNCTION: Free allocated space for special inode
*/
void diFreeSpecial(struct inode *ip)
{
if (ip == NULL) {
jfs_err("diFreeSpecial called with NULL ip!");
return;
}
filemap_write_and_wait(ip->i_mapping);
truncate_inode_pages(ip->i_mapping, 0);
iput(ip);
}
/*
* NAME: diWrite()
*
* FUNCTION: write the on-disk inode portion of the in-memory inode
* to its corresponding on-disk inode.
*
* on entry, the specifed incore inode should itself
* specify the disk inode number corresponding to the
* incore inode (i.e. i_number should be initialized).
*
* the inode contains the inode extent address for the disk
* inode. with the inode extent address in hand, the
* page of the extent that contains the disk inode is
* read and the disk inode portion of the incore inode
* is copied to the disk inode.
*
* PARAMETERS:
* tid - transacation id
* ip - pointer to incore inode to be written to the inode extent.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
*/
int diWrite(tid_t tid, struct inode *ip)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
s32 ino;
struct dinode *dp;
s64 blkno;
int block_offset;
int inodes_left;
struct metapage *mp;
unsigned long pageno;
int rel_inode;
int dioffset;
struct inode *ipimap;
uint type;
lid_t lid;
struct tlock *ditlck, *tlck;
struct linelock *dilinelock, *ilinelock;
struct lv *lv;
int n;
ipimap = jfs_ip->ipimap;
ino = ip->i_ino & (INOSPERIAG - 1);
if (!addressPXD(&(jfs_ip->ixpxd)) ||
(lengthPXD(&(jfs_ip->ixpxd)) !=
JFS_IP(ipimap)->i_imap->im_nbperiext)) {
jfs_error(ip->i_sb, "ixpxd invalid\n");
return -EIO;
}
/*
* read the page of disk inode containing the specified inode:
*/
/* compute the block address of the page */
blkno = INOPBLK(&(jfs_ip->ixpxd), ino, sbi->l2nbperpage);
rel_inode = (ino & (INOSPERPAGE - 1));
pageno = blkno >> sbi->l2nbperpage;
if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) {
/*
* OS/2 didn't always align inode extents on page boundaries
*/
inodes_left =
(sbi->nbperpage - block_offset) << sbi->l2niperblk;
if (rel_inode < inodes_left)
rel_inode += block_offset << sbi->l2niperblk;
else {
pageno += 1;
rel_inode -= inodes_left;
}
}
/* read the page of disk inode */
retry:
mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1);
if (!mp)
return -EIO;
/* get the pointer to the disk inode */
dp = (struct dinode *) mp->data;
dp += rel_inode;
dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE;
/*
* acquire transaction lock on the on-disk inode;
* N.B. tlock is acquired on ipimap not ip;
*/
if ((ditlck =
txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL)
goto retry;
dilinelock = (struct linelock *) & ditlck->lock;
/*
* copy btree root from in-memory inode to on-disk inode
*
* (tlock is taken from inline B+-tree root in in-memory
* inode when the B+-tree root is updated, which is pointed
* by jfs_ip->blid as well as being on tx tlock list)
*
* further processing of btree root is based on the copy
* in in-memory inode, where txLog() will log from, and,
* for xtree root, txUpdateMap() will update map and reset
* XAD_NEW bit;
*/
if (S_ISDIR(ip->i_mode) && (lid = jfs_ip->xtlid)) {
/*
* This is the special xtree inside the directory for storing
* the directory table
*/
xtpage_t *p, *xp;
xad_t *xad;
jfs_ip->xtlid = 0;
tlck = lid_to_tlock(lid);
assert(tlck->type & tlckXTREE);
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (struct linelock *) & tlck->lock;
/*
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
xp = (xtpage_t *) &dp->di_dirtable;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
}
/* reset on-disk (metadata page) xtree XAD_NEW bit */
xad = &xp->xad[XTENTRYSTART];
for (n = XTENTRYSTART;
n < le16_to_cpu(xp->header.nextindex); n++, xad++)
if (xad->flag & (XAD_NEW | XAD_EXTENDED))
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
}
if ((lid = jfs_ip->blid) == 0)
goto inlineData;
jfs_ip->blid = 0;
tlck = lid_to_tlock(lid);
type = tlck->type;
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (struct linelock *) & tlck->lock;
/*
* regular file: 16 byte (XAD slot) granularity
*/
if (type & tlckXTREE) {
xtpage_t *p, *xp;
xad_t *xad;
/*
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
xp = &dp->di_xtroot;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
}
/* reset on-disk (metadata page) xtree XAD_NEW bit */
xad = &xp->xad[XTENTRYSTART];
for (n = XTENTRYSTART;
n < le16_to_cpu(xp->header.nextindex); n++, xad++)
if (xad->flag & (XAD_NEW | XAD_EXTENDED))
xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
}
/*
* directory: 32 byte (directory entry slot) granularity
*/
else if (type & tlckDTREE) {
dtpage_t *p, *xp;
/*
* copy dtree root from inode to dinode:
*/
p = (dtpage_t *) &jfs_ip->i_dtroot;
xp = (dtpage_t *) & dp->di_dtroot;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
lv->length << L2DTSLOTSIZE);
}
} else {
jfs_err("diWrite: UFO tlock");
}
inlineData:
/*
* copy inline symlink from in-memory inode to on-disk inode
*/
if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) {
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
lv->length = 2;
memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE);
dilinelock->index++;
}
/*
* copy inline data from in-memory inode to on-disk inode:
* 128 byte slot granularity
*/
if (test_cflag(COMMIT_Inlineea, ip)) {
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE;
lv->length = 1;
memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE);
dilinelock->index++;
clear_cflag(COMMIT_Inlineea, ip);
}
/*
* lock/copy inode base: 128 byte slot granularity
*/
lv = & dilinelock->lv[dilinelock->index];
lv->offset = dioffset >> L2INODESLOTSIZE;
copy_to_dinode(dp, ip);
if (test_and_clear_cflag(COMMIT_Dirtable, ip)) {
lv->length = 2;
memcpy(&dp->di_dirtable, &jfs_ip->i_dirtable, 96);
} else
lv->length = 1;
dilinelock->index++;
/* release the buffer holding the updated on-disk inode.
* the buffer will be later written by commit processing.
*/
write_metapage(mp);
return (rc);
}
/*
* NAME: diFree(ip)
*
* FUNCTION: free a specified inode from the inode working map
* for a fileset or aggregate.
*
* if the inode to be freed represents the first (only)
* free inode within the iag, the iag will be placed on
* the ag free inode list.
*
* freeing the inode will cause the inode extent to be
* freed if the inode is the only allocated inode within
* the extent. in this case all the disk resource backing
* up the inode extent will be freed. in addition, the iag
* will be placed on the ag extent free list if the extent
* is the first free extent in the iag. if freeing the
* extent also means that no free inodes will exist for
* the iag, the iag will also be removed from the ag free
* inode list.
*
* the iag describing the inode will be freed if the extent
* is to be freed and it is the only backed extent within
* the iag. in this case, the iag will be removed from the
* ag free extent list and ag free inode list and placed on
* the inode map's free iag list.
*
* a careful update approach is used to provide consistency
* in the face of updates to multiple buffers. under this
* approach, all required buffers are obtained before making
* any updates and are held until all updates are complete.
*
* PARAMETERS:
* ip - inode to be freed.
*
* RETURN VALUES:
* 0 - success
* -EIO - i/o error.
*/
int diFree(struct inode *ip)
{
int rc;
ino_t inum = ip->i_ino;
struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp;
struct metapage *mp, *amp, *bmp, *cmp, *dmp;
int iagno, ino, extno, bitno, sword, agno;
int back, fwd;
u32 bitmap, mask;
struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
pxd_t freepxd;
tid_t tid;
struct inode *iplist[3];
struct tlock *tlck;
struct pxd_lock *pxdlock;
/*
* This is just to suppress compiler warnings. The same logic that
* references these variables is used to initialize them.
*/
aiagp = biagp = ciagp = diagp = NULL;
/* get the iag number containing the inode.
*/
iagno = INOTOIAG(inum);
/* make sure that the iag is contained within
* the map.
*/
if (iagno >= imap->im_nextiag) {
print_hex_dump(KERN_ERR, "imap: ", DUMP_PREFIX_ADDRESS, 16, 4,
imap, 32, 0);
jfs_error(ip->i_sb, "inum = %d, iagno = %d, nextiag = %d\n",
(uint) inum, iagno, imap->im_nextiag);
return -EIO;
}
/* get the allocation group for this ino.
*/
agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
/* Lock the AG specific inode map information
*/
AG_LOCK(imap, agno);
/* Obtain read lock in imap inode. Don't release it until we have
* read all of the IAG's that we are going to.
*/
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag.
*/
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
*/
ino = inum & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
mask = HIGHORDER >> bitno;
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
jfs_error(ip->i_sb, "wmap shows inode already free\n");
}
if (!addressPXD(&iagp->inoext[extno])) {
release_metapage(mp);
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb, "invalid inoext\n");
return -EIO;
}
/* compute the bitmap for the extent reflecting the freed inode.
*/
bitmap = le32_to_cpu(iagp->wmap[extno]) & ~mask;
if (imap->im_agctl[agno].numfree > imap->im_agctl[agno].numinos) {
release_metapage(mp);
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb, "numfree > numinos\n");
return -EIO;
}
/*
* inode extent still has some inodes or below low water mark:
* keep the inode extent;
*/
if (bitmap ||
imap->im_agctl[agno].numfree < 96 ||
(imap->im_agctl[agno].numfree < 288 &&
(((imap->im_agctl[agno].numfree * 100) /
imap->im_agctl[agno].numinos) <= 25))) {
/* if the iag currently has no free inodes (i.e.,
* the inode being freed is the first free inode of iag),
* insert the iag at head of the inode free list for the ag.
*/
if (iagp->nfreeinos == 0) {
/* check if there are any iags on the ag inode
* free list. if so, read the first one so that
* we can link the current iag onto the list at
* the head.
*/
if ((fwd = imap->im_agctl[agno].inofree) >= 0) {
/* read the iag that currently is the head
* of the list.
*/
if ((rc = diIAGRead(imap, fwd, &))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
release_metapage(mp);
return (rc);
}
aiagp = (struct iag *) amp->data;
/* make current head point back to the iag.
*/
aiagp->inofreeback = cpu_to_le32(iagno);
write_metapage(amp);
}
/* iag points forward to current head and iag
* becomes the new head of the list.
*/
iagp->inofreefwd =
cpu_to_le32(imap->im_agctl[agno].inofree);
iagp->inofreeback = cpu_to_le32(-1);
imap->im_agctl[agno].inofree = iagno;
}
IREAD_UNLOCK(ipimap);
/* update the free inode summary map for the extent if
* freeing the inode means the extent will now have free
* inodes (i.e., the inode being freed is the first free
* inode of extent),
*/
if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
iagp->inosmap[sword] &=
cpu_to_le32(~(HIGHORDER >> bitno));
}
/* update the bitmap.
*/
iagp->wmap[extno] = cpu_to_le32(bitmap);
/* update the free inode counts at the iag, ag and
* map level.
*/
le32_add_cpu(&iagp->nfreeinos, 1);
imap->im_agctl[agno].numfree += 1;
atomic_inc(&imap->im_numfree);
/* release the AG inode map lock
*/
AG_UNLOCK(imap, agno);
/* write the iag */
write_metapage(mp);
return (0);
}
/*
* inode extent has become free and above low water mark:
* free the inode extent;
*/
/*
* prepare to update iag list(s) (careful update step 1)
*/
amp = bmp = cmp = dmp = NULL;
fwd = back = -1;
/* check if the iag currently has no free extents. if so,
* it will be placed on the head of the ag extent free list.
*/
if (iagp->nfreeexts == 0) {
/* check if the ag extent free list has any iags.
* if so, read the iag at the head of the list now.
* this (head) iag will be updated later to reflect
* the addition of the current iag at the head of
* the list.
*/
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
} else {
/* iag has free extents. check if the addition of a free
* extent will cause all extents to be free within this
* iag. if so, the iag will be removed from the ag extent
* free list and placed on the inode map's free iag list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
/* in preparation for removing the iag from the
* ag extent free list, read the iags preceding
* and following the iag on the ag extent free
* list.
*/
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (struct iag *) bmp->data;
}
}
}
/* remove the iag from the ag inode free list if freeing
* this extent cause the iag to have no free inodes.
*/
if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
int inofreeback = le32_to_cpu(iagp->inofreeback);
int inofreefwd = le32_to_cpu(iagp->inofreefwd);
/* in preparation for removing the iag from the
* ag inode free list, read the iags preceding
* and following the iag on the ag inode free
* list. before reading these iags, we must make
* sure that we already don't have them in hand
* from up above, since re-reading an iag (buffer)
* we are currently holding would cause a deadlock.
*/
if (inofreefwd >= 0) {
if (inofreefwd == fwd)
ciagp = (struct iag *) amp->data;
else if (inofreefwd == back)
ciagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreefwd, &cmp)))
goto error_out;
ciagp = (struct iag *) cmp->data;
}
assert(ciagp != NULL);
}
if (inofreeback >= 0) {
if (inofreeback == fwd)
diagp = (struct iag *) amp->data;
else if (inofreeback == back)
diagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreeback, &dmp)))
goto error_out;
diagp = (struct iag *) dmp->data;
}
assert(diagp != NULL);
}
}
IREAD_UNLOCK(ipimap);
/*
* invalidate any page of the inode extent freed from buffer cache;
*/
freepxd = iagp->inoext[extno];
invalidate_pxd_metapages(ip, freepxd);
/*
* update iag list(s) (careful update step 2)
*/
/* add the iag to the ag extent free list if this is the
* first free extent for the iag.
*/
if (iagp->nfreeexts == 0) {
if (fwd >= 0)
aiagp->extfreeback = cpu_to_le32(iagno);
iagp->extfreefwd =
cpu_to_le32(imap->im_agctl[agno].extfree);
iagp->extfreeback = cpu_to_le32(-1);
imap->im_agctl[agno].extfree = iagno;
} else {
/* remove the iag from the ag extent list if all extents
* are now free and place it on the inode map iag free list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG - 1)) {
if (fwd >= 0)
aiagp->extfreeback = iagp->extfreeback;
if (back >= 0)
biagp->extfreefwd = iagp->extfreefwd;
else
imap->im_agctl[agno].extfree =
le32_to_cpu(iagp->extfreefwd);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
IAGFREE_LOCK(imap);
iagp->iagfree = cpu_to_le32(imap->im_freeiag);
imap->im_freeiag = iagno;
IAGFREE_UNLOCK(imap);
}
}
/* remove the iag from the ag inode free list if freeing
* this extent causes the iag to have no free inodes.
*/
if (iagp->nfreeinos == cpu_to_le32(INOSPEREXT - 1)) {
if ((int) le32_to_cpu(iagp->inofreefwd) >= 0)
ciagp->inofreeback = iagp->inofreeback;
if ((int) le32_to_cpu(iagp->inofreeback) >= 0)
diagp->inofreefwd = iagp->inofreefwd;
else
imap->im_agctl[agno].inofree =
le32_to_cpu(iagp->inofreefwd);
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
}
/* update the inode extent address and working map
* to reflect the free extent.
* the permanent map should have been updated already
* for the inode being freed.
*/
if (iagp->pmap[extno] != 0) {
jfs_error(ip->i_sb, "the pmap does not show inode free\n");
}
iagp->wmap[extno] = 0;
PXDlength(&iagp->inoext[extno], 0);
PXDaddress(&iagp->inoext[extno], 0);
/* update the free extent and free inode summary maps
* to reflect the freed extent.
* the inode summary map is marked to indicate no inodes
* available for the freed extent.
*/
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
mask = HIGHORDER >> bitno;
iagp->inosmap[sword] |= cpu_to_le32(mask);
iagp->extsmap[sword] &= cpu_to_le32(~mask);
/* update the number of free inodes and number of free extents
* for the iag.
*/
le32_add_cpu(&iagp->nfreeinos, -(INOSPEREXT - 1));
le32_add_cpu(&iagp->nfreeexts, 1);
/* update the number of free inodes and backed inodes
* at the ag and inode map level.
*/
imap->im_agctl[agno].numfree -= (INOSPEREXT - 1);
imap->im_agctl[agno].numinos -= INOSPEREXT;
atomic_sub(INOSPEREXT - 1, &imap->im_numfree);
atomic_sub(INOSPEREXT, &imap->im_numinos);
if (amp)
write_metapage(amp);
if (bmp)
write_metapage(bmp);
if (cmp)
write_metapage(cmp);
if (dmp)
write_metapage(dmp);
/*
* start transaction to update block allocation map
* for the inode extent freed;
*
* N.B. AG_LOCK is released and iag will be released below, and
* other thread may allocate inode from/reusing the ixad freed
* BUT with new/different backing inode extent from the extent
* to be freed by the transaction;
*/
tid = txBegin(ipimap->i_sb, COMMIT_FORCE);
mutex_lock(&JFS_IP(ipimap)->commit_mutex);
/* acquire tlock of the iag page of the freed ixad
* to force the page NOHOMEOK (even though no data is
* logged from the iag page) until NOREDOPAGE|FREEXTENT log
* for the free of the extent is committed;
* write FREEXTENT|NOREDOPAGE log record
* N.B. linelock is overlaid as freed extent descriptor;
*/
tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE);
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = freepxd;
pxdlock->index = 1;
write_metapage(mp);
iplist[0] = ipimap;
/*
* logredo needs the IAG number and IAG extent index in order
* to ensure that the IMap is consistent. The least disruptive
* way to pass these values through to the transaction manager
* is in the iplist array.
*
* It's not pretty, but it works.
*/
iplist[1] = (struct inode *) (size_t)iagno;
iplist[2] = (struct inode *) (size_t)extno;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* unlock the AG inode map information */
AG_UNLOCK(imap, agno);
return (0);
error_out:
IREAD_UNLOCK(ipimap);
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
if (cmp)
release_metapage(cmp);
if (dmp)
release_metapage(dmp);
AG_UNLOCK(imap, agno);
release_metapage(mp);
return (rc);
}
/*
* There are several places in the diAlloc* routines where we initialize
* the inode.
*/
static inline void
diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
ip->i_ino = (iagno << L2INOSPERIAG) + ino;
jfs_ip->ixpxd = iagp->inoext[extno];
jfs_ip->agstart = le64_to_cpu(iagp->agstart);
jfs_ip->active_ag = -1;
}
/*
* NAME: diAlloc(pip,dir,ip)
*
* FUNCTION: allocate a disk inode from the inode working map
* for a fileset or aggregate.
*
* PARAMETERS:
* pip - pointer to incore inode for the parent inode.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
int diAlloc(struct inode *pip, bool dir, struct inode *ip)
{
int rc, ino, iagno, addext, extno, bitno, sword;
int nwords, rem, i, agno;
u32 mask, inosmap, extsmap;
struct inode *ipimap;
struct metapage *mp;
ino_t inum;
struct iag *iagp;
struct inomap *imap;
/* get the pointers to the inode map inode and the
* corresponding imap control structure.
*/
ipimap = JFS_SBI(pip->i_sb)->ipimap;
imap = JFS_IP(ipimap)->i_imap;
JFS_IP(ip)->ipimap = ipimap;
JFS_IP(ip)->fileset = FILESYSTEM_I;
/* for a directory, the allocation policy is to start
* at the ag level using the preferred ag.
*/
if (dir) {
agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
AG_LOCK(imap, agno);
goto tryag;
}
/* for files, the policy starts off by trying to allocate from
* the same iag containing the parent disk inode:
* try to allocate the new disk inode close to the parent disk
* inode, using parent disk inode number + 1 as the allocation
* hint. (we use a left-to-right policy to attempt to avoid
* moving backward on the disk.) compute the hint within the
* file system and the iag.
*/
/* get the ag number of this iag */
agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
/*
* There is an open file actively growing. We want to
* allocate new inodes from a different ag to avoid
* fragmentation problems.
*/
agno = dbNextAG(JFS_SBI(pip->i_sb)->ipbmap);
AG_LOCK(imap, agno);
goto tryag;
}
inum = pip->i_ino + 1;
ino = inum & (INOSPERIAG - 1);
/* back off the hint if it is outside of the iag */
if (ino == 0)
inum = pip->i_ino;
/* lock the AG inode map information */
AG_LOCK(imap, agno);
/* Get read lock on imap inode */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* get the iag number and read the iag */
iagno = INOTOIAG(inum);
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
iagp = (struct iag *) mp->data;
/* determine if new inode extent is allowed to be added to the iag.
* new inode extent can be added to the iag if the ag
* has less than 32 free disk inodes and the iag has free extents.
*/
addext = (imap->im_agctl[agno].numfree < 32 && iagp->nfreeexts);
/*
* try to allocate from the IAG
*/
/* check if the inode may be allocated from the iag
* (i.e. the inode has free inodes or new extent can be added).
*/
if (iagp->nfreeinos || addext) {
/* determine the extent number of the hint.
*/
extno = ino >> L2INOSPEREXT;
/* check if the extent containing the hint has backed
* inodes. if so, try to allocate within this extent.
*/
if (addressPXD(&iagp->inoext[extno])) {
bitno = ino & (INOSPEREXT - 1);
if ((bitno =
diFindFree(le32_to_cpu(iagp->wmap[extno]),
bitno))
< INOSPEREXT) {
ino = (extno << L2INOSPEREXT) + bitno;
/* a free inode (bit) was found within this
* extent, so allocate it.
*/
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(ipimap);
if (rc) {
assert(rc == -EIO);
} else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno, ino, extno,
iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the AG lock and return.
*/
AG_UNLOCK(imap, agno);
return (rc);
}
if (!addext)
extno =
(extno ==
EXTSPERIAG - 1) ? 0 : extno + 1;
}
/*
* no free inodes within the extent containing the hint.
*
* try to allocate from the backed extents following
* hint or, if appropriate (i.e. addext is true), allocate
* an extent of free inodes at or following the extent
* containing the hint.
*
* the free inode and free extent summary maps are used
* here, so determine the starting summary map position
* and the number of words we'll have to examine. again,
* the approach is to allocate following the hint, so we
* might have to initially ignore prior bits of the summary
* map that represent extents prior to the extent containing
* the hint and later revisit these bits.
*/
bitno = extno & (EXTSPERSUM - 1);
nwords = (bitno == 0) ? SMAPSZ : SMAPSZ + 1;
sword = extno >> L2EXTSPERSUM;
/* mask any prior bits for the starting words of the
* summary map.
*/
mask = (bitno == 0) ? 0 : (ONES << (EXTSPERSUM - bitno));
inosmap = le32_to_cpu(iagp->inosmap[sword]) | mask;
extsmap = le32_to_cpu(iagp->extsmap[sword]) | mask;
/* scan the free inode and free extent summary maps for
* free resources.
*/
for (i = 0; i < nwords; i++) {
/* check if this word of the free inode summary
* map describes an extent with free inodes.
*/
if (~inosmap) {
/* an extent with free inodes has been
* found. determine the extent number
* and the inode number within the extent.
*/
rem = diFindFree(inosmap, 0);
extno = (sword << L2EXTSPERSUM) + rem;
rem = diFindFree(le32_to_cpu(iagp->wmap[extno]),
0);
if (rem >= INOSPEREXT) {
IREAD_UNLOCK(ipimap);
release_metapage(mp);
AG_UNLOCK(imap, agno);
jfs_error(ip->i_sb,
"can't find free bit in wmap\n");
return -EIO;
}
/* determine the inode number within the
* iag and allocate the inode from the
* map.
*/
ino = (extno << L2INOSPEREXT) + rem;
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(ipimap);
if (rc)
assert(rc == -EIO);
else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno, ino, extno,
iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the AG lock and return.
*/
AG_UNLOCK(imap, agno);
return (rc);
}
/* check if we may allocate an extent of free
* inodes and whether this word of the free
* extents summary map describes a free extent.
*/
if (addext && ~extsmap) {
/* a free extent has been found. determine
* the extent number.
*/
rem = diFindFree(extsmap, 0);
extno = (sword << L2EXTSPERSUM) + rem;
/* allocate an extent of free inodes.
*/
if ((rc = diNewExt(imap, iagp, extno))) {
/* if there is no disk space for a
* new extent, try to allocate the
* disk inode from somewhere else.
*/
if (rc == -ENOSPC)
break;
assert(rc == -EIO);
} else {
/* set the results of the allocation
* and write the iag.
*/
diInitInode(ip, iagno,
extno << L2INOSPEREXT,
extno, iagp);
mark_metapage_dirty(mp);
}
release_metapage(mp);
/* free the imap inode & the AG lock & return.
*/
IREAD_UNLOCK(ipimap);
AG_UNLOCK(imap, agno);
return (rc);
}
/* move on to the next set of summary map words.
*/
sword = (sword == SMAPSZ - 1) ? 0 : sword + 1;
inosmap = le32_to_cpu(iagp->inosmap[sword]);
extsmap = le32_to_cpu(iagp->extsmap[sword]);
}
}
/* unlock imap inode */
IREAD_UNLOCK(ipimap);
/* nothing doing in this iag, so release it. */
release_metapage(mp);
tryag:
/*
* try to allocate anywhere within the same AG as the parent inode.
*/
rc = diAllocAG(imap, agno, dir, ip);
AG_UNLOCK(imap, agno);
if (rc != -ENOSPC)
return (rc);
/*
* try to allocate in any AG.
*/
return (diAllocAny(imap, agno, dir, ip));
}
/*
* NAME: diAllocAG(imap,agno,dir,ip)
*
* FUNCTION: allocate a disk inode from the allocation group.
*
* this routine first determines if a new extent of free
* inodes should be added for the allocation group, with
* the current request satisfied from this extent. if this
* is the case, an attempt will be made to do just that. if
* this attempt fails or it has been determined that a new
* extent should not be added, an attempt is made to satisfy
* the request by allocating an existing (backed) free inode
* from the allocation group.
*
* PRE CONDITION: Already have the AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group to allocate from.
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to the new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAG(struct inomap * imap, int agno, bool dir, struct inode *ip)
{
int rc, addext, numfree, numinos;
/* get the number of free and the number of backed disk
* inodes currently within the ag.
*/
numfree = imap->im_agctl[agno].numfree;
numinos = imap->im_agctl[agno].numinos;
if (numfree > numinos) {
jfs_error(ip->i_sb, "numfree > numinos\n");
return -EIO;
}
/* determine if we should allocate a new extent of free inodes
* within the ag: for directory inodes, add a new extent
* if there are a small number of free inodes or number of free
* inodes is a small percentage of the number of backed inodes.
*/
if (dir)
addext = (numfree < 64 ||
(numfree < 256
&& ((numfree * 100) / numinos) <= 20));
else
addext = (numfree == 0);
/*
* try to allocate a new extent of free inodes.
*/
if (addext) {
/* if free space is not available for this new extent, try
* below to allocate a free and existing (already backed)
* inode from the ag.
*/
if ((rc = diAllocExt(imap, agno, ip)) != -ENOSPC)
return (rc);
}
/*
* try to allocate an existing free inode from the ag.
*/
return (diAllocIno(imap, agno, ip));
}
/*
* NAME: diAllocAny(imap,agno,dir,iap)
*
* FUNCTION: allocate a disk inode from any other allocation group.
*
* this routine is called when an allocation attempt within
* the primary allocation group has failed. if attempts to
* allocate an inode from any allocation group other than the
* specified primary group.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - primary allocation group (to avoid).
* dir - 'true' if the new disk inode is for a directory.
* ip - pointer to a new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int
diAllocAny(struct inomap * imap, int agno, bool dir, struct inode *ip)
{
int ag, rc;
int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag;
/* try to allocate from the ags following agno up to
* the maximum ag number.
*/
for (ag = agno + 1; ag <= maxag; ag++) {
AG_LOCK(imap, ag);
rc = diAllocAG(imap, ag, dir, ip);
AG_UNLOCK(imap, ag);
if (rc != -ENOSPC)
return (rc);
}
/* try to allocate from the ags in front of agno.
*/
for (ag = 0; ag < agno; ag++) {
AG_LOCK(imap, ag);
rc = diAllocAG(imap, ag, dir, ip);
AG_UNLOCK(imap, ag);
if (rc != -ENOSPC)
return (rc);
}
/* no free disk inodes.
*/
return -ENOSPC;
}
/*
* NAME: diAllocIno(imap,agno,ip)
*
* FUNCTION: allocate a disk inode from the allocation group's free
* inode list, returning an error if this free list is
* empty (i.e. no iags on the list).
*
* allocation occurs from the first iag on the list using
* the iag's free inode summary map to find the leftmost
* free inode in the iag.
*
* PRE CONDITION: Already have AG lock for this AG.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
{
int iagno, ino, rc, rem, extno, sword;
struct metapage *mp;
struct iag *iagp;
/* check if there are iags on the ag's free inode list.
*/
if ((iagno = imap->im_agctl[agno].inofree) < 0)
return -ENOSPC;
/* obtain read lock on imap inode */
IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
/* read the iag at the head of the list.
*/
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(imap->im_ipimap);
return (rc);
}
iagp = (struct iag *) mp->data;
/* better be free inodes in this iag if it is on the
* list.
*/
if (!iagp->nfreeinos) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "nfreeinos = 0, but iag on freelist\n");
return -EIO;
}
/* scan the free inode summary map to find an extent
* with free inodes.
*/
for (sword = 0;; sword++) {
if (sword >= SMAPSZ) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb,
"free inode not found in summary map\n");
return -EIO;
}
if (~iagp->inosmap[sword])
break;
}
/* found a extent with free inodes. determine
* the extent number.
*/
rem = diFindFree(le32_to_cpu(iagp->inosmap[sword]), 0);
if (rem >= EXTSPERSUM) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "no free extent found\n");
return -EIO;
}
extno = (sword << L2EXTSPERSUM) + rem;
/* find the first free inode in the extent.
*/
rem = diFindFree(le32_to_cpu(iagp->wmap[extno]), 0);
if (rem >= INOSPEREXT) {
IREAD_UNLOCK(imap->im_ipimap);
release_metapage(mp);
jfs_error(ip->i_sb, "free inode not found\n");
return -EIO;
}
/* compute the inode number within the iag.
*/
ino = (extno << L2INOSPEREXT) + rem;
/* allocate the inode.
*/
rc = diAllocBit(imap, iagp, ino);
IREAD_UNLOCK(imap->im_ipimap);
if (rc) {
release_metapage(mp);
return (rc);
}
/* set the results of the allocation and write the iag.
*/
diInitInode(ip, iagno, ino, extno, iagp);
write_metapage(mp);
return (0);
}
/*
* NAME: diAllocExt(imap,agno,ip)
*
* FUNCTION: add a new extent of free inodes to an iag, allocating
* an inode from this extent to satisfy the current allocation
* request.
*
* this routine first tries to find an existing iag with free
* extents through the ag free extent list. if list is not
* empty, the head of the list will be selected as the home
* of the new extent of free inodes. otherwise (the list is
* empty), a new iag will be allocated for the ag to contain
* the extent.
*
* once an iag has been selected, the free extent summary map
* is used to locate a free extent within the iag and diNewExt()
* is called to initialize the extent, with initialization
* including the allocation of the first inode of the extent
* for the purpose of satisfying this request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* agno - allocation group number.
* ip - pointer to new inode to be filled in on successful return
* with the disk inode number allocated, its extent address
* and the start of the ag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
{
int rem, iagno, sword, extno, rc;
struct metapage *mp;
struct iag *iagp;
/* check if the ag has any iags with free extents. if not,
* allocate a new iag for the ag.
*/
if ((iagno = imap->im_agctl[agno].extfree) < 0) {
/* If successful, diNewIAG will obtain the read lock on the
* imap inode.
*/
if ((rc = diNewIAG(imap, &iagno, agno, &mp))) {
return (rc);
}
iagp = (struct iag *) mp->data;
/* set the ag number if this a brand new iag
*/
iagp->agstart =
cpu_to_le64(AGTOBLK(agno, imap->im_ipimap));
} else {
/* read the iag.
*/
IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "error reading iag\n");
return rc;
}
iagp = (struct iag *) mp->data;
}
/* using the free extent summary map, find a free extent.
*/
for (sword = 0;; sword++) {
if (sword >= SMAPSZ) {
release_metapage(mp);
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "free ext summary map not found\n");
return -EIO;
}
if (~iagp->extsmap[sword])
break;
}
/* determine the extent number of the free extent.
*/
rem = diFindFree(le32_to_cpu(iagp->extsmap[sword]), 0);
if (rem >= EXTSPERSUM) {
release_metapage(mp);
IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "free extent not found\n");
return -EIO;
}
extno = (sword << L2EXTSPERSUM) + rem;
/* initialize the new extent.
*/
rc = diNewExt(imap, iagp, extno);
IREAD_UNLOCK(imap->im_ipimap);
if (rc) {
/* something bad happened. if a new iag was allocated,
* place it back on the inode map's iag free list, and
* clear the ag number information.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
IAGFREE_LOCK(imap);
iagp->iagfree = cpu_to_le32(imap->im_freeiag);
imap->im_freeiag = iagno;
IAGFREE_UNLOCK(imap);
}
write_metapage(mp);
return (rc);
}
/* set the results of the allocation and write the iag.
*/
diInitInode(ip, iagno, extno << L2INOSPEREXT, extno, iagp);
write_metapage(mp);
return (0);
}
/*
* NAME: diAllocBit(imap,iagp,ino)
*
* FUNCTION: allocate a backed inode from an iag.
*
* this routine performs the mechanics of allocating a
* specified inode from a backed extent.
*
* if the inode to be allocated represents the last free
* inode within the iag, the iag will be removed from the
* ag free inode list.
*
* a careful update approach is used to provide consistency
* in the face of updates to multiple buffers. under this
* approach, all required buffers are obtained before making
* any updates and are held all are updates are complete.
*
* PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* ino - inode number to be allocated within the iag.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
{
int extno, bitno, agno, sword, rc;
struct metapage *amp = NULL, *bmp = NULL;
struct iag *aiagp = NULL, *biagp = NULL;
u32 mask;
/* check if this is the last free inode within the iag.
* if so, it will have to be removed from the ag free
* inode list, so get the iags preceding and following
* it on the list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
if ((int) le32_to_cpu(iagp->inofreefwd) >= 0) {
if ((rc =
diIAGRead(imap, le32_to_cpu(iagp->inofreefwd),
&)))
return (rc);
aiagp = (struct iag *) amp->data;
}
if ((int) le32_to_cpu(iagp->inofreeback) >= 0) {
if ((rc =
diIAGRead(imap,
le32_to_cpu(iagp->inofreeback),
&bmp))) {
if (amp)
release_metapage(amp);
return (rc);
}
biagp = (struct iag *) bmp->data;
}
}
/* get the ag number, extent number, inode number within
* the extent.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), JFS_SBI(imap->im_ipimap->i_sb));
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
/* compute the mask for setting the map.
*/
mask = HIGHORDER >> bitno;
/* the inode should be free and backed.
*/
if (((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) ||
((le32_to_cpu(iagp->wmap[extno]) & mask) != 0) ||
(addressPXD(&iagp->inoext[extno]) == 0)) {
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
jfs_error(imap->im_ipimap->i_sb, "iag inconsistent\n");
return -EIO;
}
/* mark the inode as allocated in the working map.
*/
iagp->wmap[extno] |= cpu_to_le32(mask);
/* check if all inodes within the extent are now
* allocated. if so, update the free inode summary
* map to reflect this.
*/
if (iagp->wmap[extno] == cpu_to_le32(ONES)) {
sword = extno >> L2EXTSPERSUM;
bitno = extno & (EXTSPERSUM - 1);
iagp->inosmap[sword] |= cpu_to_le32(HIGHORDER >> bitno);
}
/* if this was the last free inode in the iag, remove the
* iag from the ag free inode list.
*/
if (iagp->nfreeinos == cpu_to_le32(1)) {
if (amp) {
aiagp->inofreeback = iagp->inofreeback;
write_metapage(amp);
}
if (bmp) {
biagp->inofreefwd = iagp->inofreefwd;
write_metapage(bmp);
} else {
imap->im_agctl[agno].inofree =
le32_to_cpu(iagp->inofreefwd);
}
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
}
/* update the free inode count at the iag, ag, inode
* map levels.
*/
le32_add_cpu(&iagp->nfreeinos, -1);
imap->im_agctl[agno].numfree -= 1;
atomic_dec(&imap->im_numfree);
return (0);
}
/*
* NAME: diNewExt(imap,iagp,extno)
*
* FUNCTION: initialize a new extent of inodes for an iag, allocating
* the first inode of the extent for use for the current
* allocation request.
*
* disk resources are allocated for the new extent of inodes
* and the inodes themselves are initialized to reflect their
* existence within the extent (i.e. their inode numbers and
* inode extent addresses are set) and their initial state
* (mode and link count are set to zero).
*
* if the iag is new, it is not yet on an ag extent free list
* but will now be placed on this list.
*
* if the allocation of the new extent causes the iag to
* have no free extent, the iag will be removed from the
* ag extent free list.
*
* if the iag has no free backed inodes, it will be placed
* on the ag free inode list, since the addition of the new
* extent will now cause it to have free inodes.
*
* a careful update approach is used to provide consistency
* (i.e. list consistency) in the face of updates to multiple
* buffers. under this approach, all required buffers are
* obtained before making any updates and are held until all
* updates are complete.
*
* PRE CONDITION: Already have buffer lock on iagp. Already have AG lock on
* this AG. Must have read lock on imap inode.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagp - pointer to iag.
* extno - extent number.
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*/
static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
{
int agno, iagno, fwd, back, freei = 0, sword, rc;
struct iag *aiagp = NULL, *biagp = NULL, *ciagp = NULL;
struct metapage *amp, *bmp, *cmp, *dmp;
struct inode *ipimap;
s64 blkno, hint;
int i, j;
u32 mask;
ino_t ino;
struct dinode *dp;
struct jfs_sb_info *sbi;
/* better have free extents.
*/
if (!iagp->nfreeexts) {
jfs_error(imap->im_ipimap->i_sb, "no free extents\n");
return -EIO;
}
/* get the inode map inode.
*/
ipimap = imap->im_ipimap;
sbi = JFS_SBI(ipimap->i_sb);
amp = bmp = cmp = NULL;
/* get the ag and iag numbers for this iag.
*/
agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
iagno = le32_to_cpu(iagp->iagnum);
/* check if this is the last free extent within the
* iag. if so, the iag must be removed from the ag
* free extent list, so get the iags preceding and
* following the iag on this list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
return (rc);
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (struct iag *) bmp->data;
}
} else {
/* the iag has free extents. if all extents are free
* (as is the case for a newly allocated iag), the iag
* must be added to the ag free extent list, so get
* the iag at the head of the list in preparation for
* adding this iag to this list.
*/
fwd = back = -1;
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &)))
goto error_out;
aiagp = (struct iag *) amp->data;
}
}
}
/* check if the iag has no free inodes. if so, the iag
* will have to be added to the ag free inode list, so get
* the iag at the head of the list in preparation for
* adding this iag to this list. in doing this, we must
* check if we already have the iag at the head of
* the list in hand.
*/
if (iagp->nfreeinos == 0) {
freei = imap->im_agctl[agno].inofree;
if (freei >= 0) {
if (freei == fwd) {
ciagp = aiagp;
} else if (freei == back) {
ciagp = biagp;
} else {
if ((rc = diIAGRead(imap, freei, &cmp)))
goto error_out;
ciagp = (struct iag *) cmp->data;
}
if (ciagp == NULL) {
jfs_error(imap->im_ipimap->i_sb,
"ciagp == NULL\n");
rc = -EIO;
goto error_out;
}
}
}
/* allocate disk space for the inode extent.
*/
if ((extno == 0) || (addressPXD(&iagp->inoext[extno - 1]) == 0))
hint = ((s64) agno << sbi->bmap->db_agl2size) - 1;
else
hint = addressPXD(&iagp->inoext[extno - 1]) +
lengthPXD(&iagp->inoext[extno - 1]) - 1;
if ((rc = dbAlloc(ipimap, hint, (s64) imap->im_nbperiext, &blkno)))
goto error_out;
/* compute the inode number of the first inode within the
* extent.
*/
ino = (iagno << L2INOSPERIAG) + (extno << L2INOSPEREXT);
/* initialize the inodes within the newly allocated extent a
* page at a time.
*/
for (i = 0; i < imap->im_nbperiext; i += sbi->nbperpage) {
/* get a buffer for this page of disk inodes.
*/
dmp = get_metapage(ipimap, blkno + i, PSIZE, 1);
if (dmp == NULL) {
rc = -EIO;
goto error_out;
}
dp = (struct dinode *) dmp->data;
/* initialize the inode number, mode, link count and
* inode extent address.
*/
for (j = 0; j < INOSPERPAGE; j++, dp++, ino++) {
dp->di_inostamp = cpu_to_le32(sbi->inostamp);
dp->di_number = cpu_to_le32(ino);
dp->di_fileset = cpu_to_le32(FILESYSTEM_I);
dp->di_mode = 0;
dp->di_nlink = 0;
PXDaddress(&(dp->di_ixpxd), blkno);
PXDlength(&(dp->di_ixpxd), imap->im_nbperiext);
}
write_metapage(dmp);
}
/* if this is the last free extent within the iag, remove the
* iag from the ag free extent list.
*/
if (iagp->nfreeexts == cpu_to_le32(1)) {
if (fwd >= 0)
aiagp->extfreeback = iagp->extfreeback;
if (back >= 0)
biagp->extfreefwd = iagp->extfreefwd;
else
imap->im_agctl[agno].extfree =
le32_to_cpu(iagp->extfreefwd);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
} else {
/* if the iag has all free extents (newly allocated iag),
* add the iag to the ag free extent list.
*/
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
if (fwd >= 0)
aiagp->extfreeback = cpu_to_le32(iagno);
iagp->extfreefwd = cpu_to_le32(fwd);
iagp->extfreeback = cpu_to_le32(-1);
imap->im_agctl[agno].extfree = iagno;
}
}
/* if the iag has no free inodes, add the iag to the
* ag free inode list.
*/
if (iagp->nfreeinos == 0) {
if (freei >= 0)
ciagp->inofreeback = cpu_to_le32(iagno);
iagp->inofreefwd =
cpu_to_le32(imap->im_agctl[agno].inofree);
iagp->inofreeback = cpu_to_le32(-1);
imap->im_agctl[agno].inofree = iagno;
}
/* initialize the extent descriptor of the extent. */
PXDlength(&iagp->inoext[extno], imap->im_nbperiext);
PXDaddress(&iagp->inoext[extno], blkno);
/* initialize the working and persistent map of the extent.
* the working map will be initialized such that
* it indicates the first inode of the extent is allocated.
*/
iagp->wmap[extno] = cpu_to_le32(HIGHORDER);
iagp->pmap[extno] = 0;
/* update the free inode and free extent summary maps
* for the extent to indicate the extent has free inodes
* and no longer represents a free extent.
*/
sword = extno >> L2EXTSPERSUM;
mask = HIGHORDER >> (extno & (EXTSPERSUM - 1));
iagp->extsmap[sword] |= cpu_to_le32(mask);
iagp->inosmap[sword] &= cpu_to_le32(~mask);
/* update the free inode and free extent counts for the
* iag.
*/
le32_add_cpu(&iagp->nfreeinos, (INOSPEREXT - 1));
le32_add_cpu(&iagp->nfreeexts, -1);
/* update the free and backed inode counts for the ag.
*/
imap->im_agctl[agno].numfree += (INOSPEREXT - 1);
imap->im_agctl[agno].numinos += INOSPEREXT;
/* update the free and backed inode counts for the inode map.
*/
atomic_add(INOSPEREXT - 1, &imap->im_numfree);
atomic_add(INOSPEREXT, &imap->im_numinos);
/* write the iags.
*/
if (amp)
write_metapage(amp);
if (bmp)
write_metapage(bmp);
if (cmp)
write_metapage(cmp);
return (0);
error_out:
/* release the iags.
*/
if (amp)
release_metapage(amp);
if (bmp)
release_metapage(bmp);
if (cmp)
release_metapage(cmp);
return (rc);
}
/*
* NAME: diNewIAG(imap,iagnop,agno)
*
* FUNCTION: allocate a new iag for an allocation group.
*
* first tries to allocate the iag from the inode map
* iagfree list:
* if the list has free iags, the head of the list is removed
* and returned to satisfy the request.
* if the inode map's iag free list is empty, the inode map
* is extended to hold a new iag. this new iag is initialized
* and returned to satisfy the request.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagnop - pointer to an iag number set with the number of the
* newly allocated iag upon successful return.
* agno - allocation group number.
* bpp - Buffer pointer to be filled in with new IAG's buffer
*
* RETURN VALUES:
* 0 - success.
* -ENOSPC - insufficient disk resources.
* -EIO - i/o error.
*
* serialization:
* AG lock held on entry/exit;
* write lock on the map is held inside;
* read lock on the map is held on successful completion;
*
* note: new iag transaction:
* . synchronously write iag;
* . write log of xtree and inode of imap;
* . commit;
* . synchronous write of xtree (right to left, bottom to top);
* . at start of logredo(): init in-memory imap with one additional iag page;
* . at end of logredo(): re-read imap inode to determine
* new imap size;
*/
static int
diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
{
int rc;
int iagno, i, xlen;
struct inode *ipimap;
struct super_block *sb;
struct jfs_sb_info *sbi;
struct metapage *mp;
struct iag *iagp;
s64 xaddr = 0;
s64 blkno;
tid_t tid;
struct inode *iplist[1];
/* pick up pointers to the inode map and mount inodes */
ipimap = imap->im_ipimap;
sb = ipimap->i_sb;
sbi = JFS_SBI(sb);
/* acquire the free iag lock */
IAGFREE_LOCK(imap);
/* if there are any iags on the inode map free iag list,
* allocate the iag from the head of the list.
*/
if (imap->im_freeiag >= 0) {
/* pick up the iag number at the head of the list */
iagno = imap->im_freeiag;
/* determine the logical block number of the iag */
blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
} else {
/* no free iags. the inode map will have to be extented
* to include a new iag.
*/
/* acquire inode map lock */
IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
IWRITE_UNLOCK(ipimap);
IAGFREE_UNLOCK(imap);
jfs_error(imap->im_ipimap->i_sb,
"ipimap->i_size is wrong\n");
return -EIO;
}
/* get the next available iag number */
iagno = imap->im_nextiag;
/* make sure that we have not exceeded the maximum inode
* number limit.
*/
if (iagno > (MAXIAGS - 1)) {
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
rc = -ENOSPC;
goto out;
}
/*
* synchronously append new iag page.
*/
/* determine the logical address of iag page to append */
blkno = IAGTOLBLK(iagno, sbi->l2nbperpage);
/* Allocate extent for new iag page */
xlen = sbi->nbperpage;
if ((rc = dbAlloc(ipimap, 0, (s64) xlen, &xaddr))) {
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
goto out;
}
/*
* start transaction of update of the inode map
* addressing structure pointing to the new iag page;
*/
tid = txBegin(sb, COMMIT_FORCE);
mutex_lock(&JFS_IP(ipimap)->commit_mutex);
/* update the inode map addressing structure to point to it */
if ((rc =
xtInsert(tid, ipimap, 0, blkno, xlen, &xaddr, 0))) {
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* Free the blocks allocated for the iag since it was
* not successfully added to the inode map
*/
dbFree(ipimap, xaddr, (s64) xlen);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
goto out;
}
/* update the inode map's inode to reflect the extension */
ipimap->i_size += PSIZE;
inode_add_bytes(ipimap, PSIZE);
/* assign a buffer for the page */
mp = get_metapage(ipimap, blkno, PSIZE, 0);
if (!mp) {
/*
* This is very unlikely since we just created the
* extent, but let's try to handle it correctly
*/
xtTruncate(tid, ipimap, ipimap->i_size - PSIZE,
COMMIT_PWMAP);
txAbort(tid, 0);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
rc = -EIO;
goto out;
}
iagp = (struct iag *) mp->data;
/* init the iag */
memset(iagp, 0, sizeof(struct iag));
iagp->iagnum = cpu_to_le32(iagno);
iagp->inofreefwd = iagp->inofreeback = cpu_to_le32(-1);
iagp->extfreefwd = iagp->extfreeback = cpu_to_le32(-1);
iagp->iagfree = cpu_to_le32(-1);
iagp->nfreeinos = 0;
iagp->nfreeexts = cpu_to_le32(EXTSPERIAG);
/* initialize the free inode summary map (free extent
* summary map initialization handled by bzero).
*/
for (i = 0; i < SMAPSZ; i++)
iagp->inosmap[i] = cpu_to_le32(ONES);
/*
* Write and sync the metapage
*/
flush_metapage(mp);
/*
* txCommit(COMMIT_FORCE) will synchronously write address
* index pages and inode after commit in careful update order
* of address index pages (right to left, bottom up);
*/
iplist[0] = ipimap;
rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE);
txEnd(tid);
mutex_unlock(&JFS_IP(ipimap)->commit_mutex);
duplicateIXtree(sb, blkno, xlen, &xaddr);
/* update the next available iag number */
imap->im_nextiag += 1;
/* Add the iag to the iag free list so we don't lose the iag
* if a failure happens now.
*/
imap->im_freeiag = iagno;
/* Until we have logredo working, we want the imap inode &
* control page to be up to date.
*/
diSync(ipimap);
/* release the inode map lock */
IWRITE_UNLOCK(ipimap);
}
/* obtain read lock on map */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag */
if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(ipimap);
rc = -EIO;
goto out;
}
iagp = (struct iag *) mp->data;
/* remove the iag from the iag free list */
imap->im_freeiag = le32_to_cpu(iagp->iagfree);
iagp->iagfree = cpu_to_le32(-1);
/* set the return iag number and buffer pointer */
*iagnop = iagno;
*mpp = mp;
out:
/* release the iag free lock */
IAGFREE_UNLOCK(imap);
return (rc);
}
/*
* NAME: diIAGRead()
*
* FUNCTION: get the buffer for the specified iag within a fileset
* or aggregate inode map.
*
* PARAMETERS:
* imap - pointer to inode map control structure.
* iagno - iag number.
* bpp - point to buffer pointer to be filled in on successful
* exit.
*
* SERIALIZATION:
* must have read lock on imap inode
* (When called by diExtendFS, the filesystem is quiesced, therefore
* the read lock is unnecessary.)
*
* RETURN VALUES:
* 0 - success.
* -EIO - i/o error.
*/
static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
{
struct inode *ipimap = imap->im_ipimap;
s64 blkno;
/* compute the logical block number of the iag. */
blkno = IAGTOLBLK(iagno, JFS_SBI(ipimap->i_sb)->l2nbperpage);
/* read the iag. */
*mpp = read_metapage(ipimap, blkno, PSIZE, 0);
if (*mpp == NULL) {
return -EIO;
}
return (0);
}
/*
* NAME: diFindFree()
*
* FUNCTION: find the first free bit in a word starting at
* the specified bit position.
*
* PARAMETERS:
* word - word to be examined.
* start - starting bit position.
*
* RETURN VALUES:
* bit position of first free bit in the word or 32 if
* no free bits were found.
*/
static int diFindFree(u32 word, int start)
{
int bitno;
assert(start < 32);
/* scan the word for the first free bit. */
for (word <<= start, bitno = start; bitno < 32;
bitno++, word <<= 1) {
if ((word & HIGHORDER) == 0)
break;
}
return (bitno);
}
/*
* NAME: diUpdatePMap()
*
* FUNCTION: Update the persistent map in an IAG for the allocation or
* freeing of the specified inode.
*
* PRE CONDITIONS: Working map has already been updated for allocate.
*
* PARAMETERS:
* ipimap - Incore inode map inode
* inum - Number of inode to mark in permanent map
* is_free - If 'true' indicates inode should be marked freed, otherwise
* indicates inode should be marked allocated.
*
* RETURN VALUES:
* 0 for success
*/
int
diUpdatePMap(struct inode *ipimap,
unsigned long inum, bool is_free, struct tblock * tblk)
{
int rc;
struct iag *iagp;
struct metapage *mp;
int iagno, ino, extno, bitno;
struct inomap *imap;
u32 mask;
struct jfs_log *log;
int lsn, difft, diffp;
unsigned long flags;
imap = JFS_IP(ipimap)->i_imap;
/* get the iag number containing the inode */
iagno = INOTOIAG(inum);
/* make sure that the iag is contained within the map */
if (iagno >= imap->im_nextiag) {
jfs_error(ipimap->i_sb, "the iag is outside the map\n");
return -EIO;
}
/* read the iag */
IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap);
if (rc)
return (rc);
metapage_wait_for_io(mp);
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
*/
ino = inum & (INOSPERIAG - 1);
extno = ino >> L2INOSPEREXT;
bitno = ino & (INOSPEREXT - 1);
mask = HIGHORDER >> bitno;
/*
* mark the inode free in persistent map:
*/
if (is_free) {
/* The inode should have been allocated both in working
* map and in persistent map;
* the inode will be freed from working map at the release
* of last reference release;
*/
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
jfs_error(ipimap->i_sb,
"inode %ld not marked as allocated in wmap!\n",
inum);
}
if (!(le32_to_cpu(iagp->pmap[extno]) & mask)) {
jfs_error(ipimap->i_sb,
"inode %ld not marked as allocated in pmap!\n",
inum);
}
/* update the bitmap for the extent of the freed inode */
iagp->pmap[extno] &= cpu_to_le32(~mask);
}
/*
* mark the inode allocated in persistent map:
*/
else {
/* The inode should be already allocated in the working map
* and should be free in persistent map;
*/
if (!(le32_to_cpu(iagp->wmap[extno]) & mask)) {
release_metapage(mp);
jfs_error(ipimap->i_sb,
"the inode is not allocated in the working map\n");
return -EIO;
}
if ((le32_to_cpu(iagp->pmap[extno]) & mask) != 0) {
release_metapage(mp);
jfs_error(ipimap->i_sb,
"the inode is not free in the persistent map\n");
return -EIO;
}
/* update the bitmap for the extent of the allocated inode */
iagp->pmap[extno] |= cpu_to_le32(mask);
}
/*
* update iag lsn
*/
lsn = tblk->lsn;
log = JFS_SBI(tblk->sb)->log;
LOGSYNC_LOCK(log, flags);
if (mp->lsn != 0) {
/* inherit older/smaller lsn */
logdiff(difft, lsn, log);
logdiff(diffp, mp->lsn, log);
if (difft < diffp) {
mp->lsn = lsn;
/* move mp after tblock in logsync list */
list_move(&mp->synclist, &tblk->synclist);
}
/* inherit younger/larger clsn */
assert(mp->clsn);
logdiff(difft, tblk->clsn, log);
logdiff(diffp, mp->clsn, log);
if (difft > diffp)
mp->clsn = tblk->clsn;
} else {
mp->log = log;
mp->lsn = lsn;
/* insert mp after tblock in logsync list */
log->count++;
list_add(&mp->synclist, &tblk->synclist);
mp->clsn = tblk->clsn;
}
LOGSYNC_UNLOCK(log, flags);
write_metapage(mp);
return (0);
}
/*
* diExtendFS()
*
* function: update imap for extendfs();
*
* note: AG size has been increased s.t. each k old contiguous AGs are
* coalesced into a new AG;
*/
int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
{
int rc, rcx = 0;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
struct iag *iagp = NULL, *hiagp = NULL;
struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *bp, *hbp;
int i, n, head;
int numinos, xnuminos = 0, xnumfree = 0;
s64 agstart;
jfs_info("diExtendFS: nextiag:%d numinos:%d numfree:%d",
imap->im_nextiag, atomic_read(&imap->im_numinos),
atomic_read(&imap->im_numfree));
/*
* reconstruct imap
*
* coalesce contiguous k (newAGSize/oldAGSize) AGs;
* i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
* note: new AG size = old AG size * (2**x).
*/
/* init per AG control information im_agctl[] */
for (i = 0; i < MAXAG; i++) {
imap->im_agctl[i].inofree = -1;
imap->im_agctl[i].extfree = -1;
imap->im_agctl[i].numinos = 0; /* number of backed inodes */
imap->im_agctl[i].numfree = 0; /* number of free backed inodes */
}
/*
* process each iag page of the map.
*
* rebuild AG Free Inode List, AG Free Inode Extent List;
*/
for (i = 0; i < imap->im_nextiag; i++) {
if ((rc = diIAGRead(imap, i, &bp))) {
rcx = rc;
continue;
}
iagp = (struct iag *) bp->data;
if (le32_to_cpu(iagp->iagnum) != i) {
release_metapage(bp);
jfs_error(ipimap->i_sb, "unexpected value of iagnum\n");
return -EIO;
}
/* leave free iag in the free iag list */
if (iagp->nfreeexts == cpu_to_le32(EXTSPERIAG)) {
release_metapage(bp);
continue;
}
agstart = le64_to_cpu(iagp->agstart);
n = agstart >> mp->db_agl2size;
iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
/* compute backed inodes */
numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
<< L2INOSPEREXT;
if (numinos > 0) {
/* merge AG backed inodes */
imap->im_agctl[n].numinos += numinos;
xnuminos += numinos;
}
/* if any backed free inodes, insert at AG free inode list */
if ((int) le32_to_cpu(iagp->nfreeinos) > 0) {
if ((head = imap->im_agctl[n].inofree) == -1) {
iagp->inofreefwd = cpu_to_le32(-1);
iagp->inofreeback = cpu_to_le32(-1);
} else {
if ((rc = diIAGRead(imap, head, &hbp))) {
rcx = rc;
goto nextiag;
}
hiagp = (struct iag *) hbp->data;
hiagp->inofreeback = iagp->iagnum;
iagp->inofreefwd = cpu_to_le32(head);
iagp->inofreeback = cpu_to_le32(-1);
write_metapage(hbp);
}
imap->im_agctl[n].inofree =
le32_to_cpu(iagp->iagnum);
/* merge AG backed free inodes */
imap->im_agctl[n].numfree +=
le32_to_cpu(iagp->nfreeinos);
xnumfree += le32_to_cpu(iagp->nfreeinos);
}
/* if any free extents, insert at AG free extent list */
if (le32_to_cpu(iagp->nfreeexts) > 0) {
if ((head = imap->im_agctl[n].extfree) == -1) {
iagp->extfreefwd = cpu_to_le32(-1);
iagp->extfreeback = cpu_to_le32(-1);
} else {
if ((rc = diIAGRead(imap, head, &hbp))) {
rcx = rc;
goto nextiag;
}
hiagp = (struct iag *) hbp->data;
hiagp->extfreeback = iagp->iagnum;
iagp->extfreefwd = cpu_to_le32(head);
iagp->extfreeback = cpu_to_le32(-1);
write_metapage(hbp);
}
imap->im_agctl[n].extfree =
le32_to_cpu(iagp->iagnum);
}
nextiag:
write_metapage(bp);
}
if (xnuminos != atomic_read(&imap->im_numinos) ||
xnumfree != atomic_read(&imap->im_numfree)) {
jfs_error(ipimap->i_sb, "numinos or numfree incorrect\n");
return -EIO;
}
return rcx;
}
/*
* duplicateIXtree()
*
* serialization: IWRITE_LOCK held on entry/exit
*
* note: shadow page with regular inode (rel.2);
*/
static void duplicateIXtree(struct super_block *sb, s64 blkno,
int xlen, s64 *xaddr)
{
struct jfs_superblock *j_sb;
struct buffer_head *bh;
struct inode *ip;
tid_t tid;
/* if AIT2 ipmap2 is bad, do not try to update it */
if (JFS_SBI(sb)->mntflag & JFS_BAD_SAIT) /* s_flag */
return;
ip = diReadSpecial(sb, FILESYSTEM_I, 1);
if (ip == NULL) {
JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
if (readSuper(sb, &bh))
return;
j_sb = (struct jfs_superblock *)bh->b_data;
j_sb->s_flag |= cpu_to_le32(JFS_BAD_SAIT);
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
return;
}
/* start transaction */
tid = txBegin(sb, COMMIT_FORCE);
/* update the inode map addressing structure to point to it */
if (xtInsert(tid, ip, 0, blkno, xlen, xaddr, 0)) {
JFS_SBI(sb)->mntflag |= JFS_BAD_SAIT;
txAbort(tid, 1);
goto cleanup;
}
/* update the inode map's inode to reflect the extension */
ip->i_size += PSIZE;
inode_add_bytes(ip, PSIZE);
txCommit(tid, 1, &ip, COMMIT_FORCE);
cleanup:
txEnd(tid);
diFreeSpecial(ip);
}
/*
* NAME: copy_from_dinode()
*
* FUNCTION: Copies inode info from disk inode to in-memory inode
*
* RETURN VALUES:
* 0 - success
* -ENOMEM - insufficient memory
*/
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
jfs_set_inode_flags(ip);
ip->i_mode = le32_to_cpu(dip->di_mode) & 0xffff;
if (sbi->umask != -1) {
ip->i_mode = (ip->i_mode & ~0777) | (0777 & ~sbi->umask);
/* For directories, add x permission if r is allowed by umask */
if (S_ISDIR(ip->i_mode)) {
if (ip->i_mode & 0400)
ip->i_mode |= 0100;
if (ip->i_mode & 0040)
ip->i_mode |= 0010;
if (ip->i_mode & 0004)
ip->i_mode |= 0001;
}
}
set_nlink(ip, le32_to_cpu(dip->di_nlink));
jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
if (!uid_valid(sbi->uid))
ip->i_uid = jfs_ip->saved_uid;
else {
ip->i_uid = sbi->uid;
}
jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
if (!gid_valid(sbi->gid))
ip->i_gid = jfs_ip->saved_gid;
else {
ip->i_gid = sbi->gid;
}
ip->i_size = le64_to_cpu(dip->di_size);
ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
ip->i_generation = le32_to_cpu(dip->di_gen);
jfs_ip->ixpxd = dip->di_ixpxd; /* in-memory pxd's are little-endian */
jfs_ip->acl = dip->di_acl; /* as are dxd's */
jfs_ip->ea = dip->di_ea;
jfs_ip->next_index = le32_to_cpu(dip->di_next_index);
jfs_ip->otime = le32_to_cpu(dip->di_otime.tv_sec);
jfs_ip->acltype = le32_to_cpu(dip->di_acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode)) {
jfs_ip->dev = le32_to_cpu(dip->di_rdev);
ip->i_rdev = new_decode_dev(jfs_ip->dev);
}
if (S_ISDIR(ip->i_mode)) {
memcpy(&jfs_ip->i_dirtable, &dip->di_dirtable, 384);
} else if (S_ISREG(ip->i_mode) || S_ISLNK(ip->i_mode)) {
memcpy(&jfs_ip->i_xtroot, &dip->di_xtroot, 288);
} else
memcpy(&jfs_ip->i_inline_ea, &dip->di_inlineea, 128);
/* Zero the in-memory-only stuff */
jfs_ip->cflag = 0;
jfs_ip->btindex = 0;
jfs_ip->btorder = 0;
jfs_ip->bxflag = 0;
jfs_ip->blid = 0;
jfs_ip->atlhead = 0;
jfs_ip->atltail = 0;
jfs_ip->xtlid = 0;
return (0);
}
/*
* NAME: copy_to_dinode()
*
* FUNCTION: Copies inode info from in-memory inode to disk inode
*/
static void copy_to_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
dip->di_fileset = cpu_to_le32(jfs_ip->fileset);
dip->di_inostamp = cpu_to_le32(sbi->inostamp);
dip->di_number = cpu_to_le32(ip->i_ino);
dip->di_gen = cpu_to_le32(ip->i_generation);
dip->di_size = cpu_to_le64(ip->i_size);
dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
dip->di_nlink = cpu_to_le32(ip->i_nlink);
if (!uid_valid(sbi->uid))
dip->di_uid = cpu_to_le32(i_uid_read(ip));
else
dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns,
jfs_ip->saved_uid));
if (!gid_valid(sbi->gid))
dip->di_gid = cpu_to_le32(i_gid_read(ip));
else
dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
jfs_ip->saved_gid));
jfs_get_inode_flags(jfs_ip);
/*
* mode2 is only needed for storing the higher order bits.
* Trust i_mode for the lower order ones
*/
if (sbi->umask == -1)
dip->di_mode = cpu_to_le32((jfs_ip->mode2 & 0xffff0000) |
ip->i_mode);
else /* Leave the original permissions alone */
dip->di_mode = cpu_to_le32(jfs_ip->mode2);
dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec);
dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
dip->di_acl = jfs_ip->acl; /* as are dxd's */
dip->di_ea = jfs_ip->ea;
dip->di_next_index = cpu_to_le32(jfs_ip->next_index);
dip->di_otime.tv_sec = cpu_to_le32(jfs_ip->otime);
dip->di_otime.tv_nsec = 0;
dip->di_acltype = cpu_to_le32(jfs_ip->acltype);
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
dip->di_rdev = cpu_to_le32(jfs_ip->dev);
}
| gpl-2.0 |
Radium-Devices/Radium_sprout | drivers/char/hw_random/picoxcell-rng.c | 2316 | 4778 | /*
* Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* All enquiries to support@picochip.com
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define DATA_REG_OFFSET 0x0200
#define CSR_REG_OFFSET 0x0278
#define CSR_OUT_EMPTY_MASK (1 << 24)
#define CSR_FAULT_MASK (1 << 1)
#define TRNG_BLOCK_RESET_MASK (1 << 0)
#define TAI_REG_OFFSET 0x0380
/*
* The maximum amount of time in microseconds to spend waiting for data if the
* core wants us to wait. The TRNG should generate 32 bits every 320ns so a
* timeout of 20us seems reasonable. The TRNG does builtin tests of the data
* for randomness so we can't always assume there is data present.
*/
#define PICO_TRNG_TIMEOUT 20
static void __iomem *rng_base;
static struct clk *rng_clk;
struct device *rng_dev;
static inline u32 picoxcell_trng_read_csr(void)
{
return __raw_readl(rng_base + CSR_REG_OFFSET);
}
static inline bool picoxcell_trng_is_empty(void)
{
return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
}
/*
* Take the random number generator out of reset and make sure the interrupts
* are masked. We shouldn't need to get large amounts of random bytes so just
* poll the status register. The hardware generates 32 bits every 320ns so we
* shouldn't have to wait long enough to warrant waiting for an IRQ.
*/
static void picoxcell_trng_start(void)
{
__raw_writel(0, rng_base + TAI_REG_OFFSET);
__raw_writel(0, rng_base + CSR_REG_OFFSET);
}
static void picoxcell_trng_reset(void)
{
__raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
__raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
picoxcell_trng_start();
}
/*
* Get some random data from the random number generator. The hw_random core
* layer provides us with locking.
*/
static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
int i;
/* Wait for some data to become available. */
for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
if (!wait)
return 0;
udelay(1);
}
if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
dev_err(rng_dev, "fault detected, resetting TRNG\n");
picoxcell_trng_reset();
return -EIO;
}
if (i == PICO_TRNG_TIMEOUT)
return 0;
*(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
return sizeof(u32);
}
static struct hwrng picoxcell_trng = {
.name = "picoxcell",
.read = picoxcell_trng_read,
};
static int picoxcell_trng_probe(struct platform_device *pdev)
{
int ret;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_warn(&pdev->dev, "no memory resource\n");
return -ENOMEM;
}
if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
"picoxcell_trng")) {
dev_warn(&pdev->dev, "unable to request io mem\n");
return -EBUSY;
}
rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
if (!rng_base) {
dev_warn(&pdev->dev, "unable to remap io mem\n");
return -ENOMEM;
}
rng_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_warn(&pdev->dev, "no clk\n");
return PTR_ERR(rng_clk);
}
ret = clk_enable(rng_clk);
if (ret) {
dev_warn(&pdev->dev, "unable to enable clk\n");
goto err_enable;
}
picoxcell_trng_start();
ret = hwrng_register(&picoxcell_trng);
if (ret)
goto err_register;
rng_dev = &pdev->dev;
dev_info(&pdev->dev, "pixoxcell random number generator active\n");
return 0;
err_register:
clk_disable(rng_clk);
err_enable:
clk_put(rng_clk);
return ret;
}
static int picoxcell_trng_remove(struct platform_device *pdev)
{
hwrng_unregister(&picoxcell_trng);
clk_disable(rng_clk);
clk_put(rng_clk);
return 0;
}
#ifdef CONFIG_PM
static int picoxcell_trng_suspend(struct device *dev)
{
clk_disable(rng_clk);
return 0;
}
static int picoxcell_trng_resume(struct device *dev)
{
return clk_enable(rng_clk);
}
static const struct dev_pm_ops picoxcell_trng_pm_ops = {
.suspend = picoxcell_trng_suspend,
.resume = picoxcell_trng_resume,
};
#endif /* CONFIG_PM */
static struct platform_driver picoxcell_trng_driver = {
.probe = picoxcell_trng_probe,
.remove = picoxcell_trng_remove,
.driver = {
.name = "picoxcell-trng",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &picoxcell_trng_pm_ops,
#endif /* CONFIG_PM */
},
};
module_platform_driver(picoxcell_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
| gpl-2.0 |
intervigilium/android_kernel_htc_msm8660 | drivers/media/rc/ir-jvc-decoder.c | 3084 | 4765 | /* ir-jvc-decoder.c - handle JVC IR Pulse/Space protocol
*
* Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitrev.h>
#include "rc-core-priv.h"
#define JVC_NBITS 16 /* dev(8) + func(8) */
#define JVC_UNIT 525000 /* ns */
#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
#define JVC_HEADER_SPACE (8 * JVC_UNIT)
#define JVC_BIT_PULSE (1 * JVC_UNIT)
#define JVC_BIT_0_SPACE (1 * JVC_UNIT)
#define JVC_BIT_1_SPACE (3 * JVC_UNIT)
#define JVC_TRAILER_PULSE (1 * JVC_UNIT)
#define JVC_TRAILER_SPACE (35 * JVC_UNIT)
enum jvc_state {
STATE_INACTIVE,
STATE_HEADER_SPACE,
STATE_BIT_PULSE,
STATE_BIT_SPACE,
STATE_TRAILER_PULSE,
STATE_TRAILER_SPACE,
STATE_CHECK_REPEAT,
};
/**
* ir_jvc_decode() - Decode one JVC pulse or space
* @dev: the struct rc_dev descriptor of the device
* @duration: the struct ir_raw_event descriptor of the pulse/space
*
* This function returns -EINVAL if the pulse violates the state machine
*/
static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct jvc_dec *data = &dev->raw->jvc;
if (!(dev->raw->enabled_protocols & RC_TYPE_JVC))
return 0;
if (!is_timing_event(ev)) {
if (ev.reset)
data->state = STATE_INACTIVE;
return 0;
}
if (!geq_margin(ev.duration, JVC_UNIT, JVC_UNIT / 2))
goto out;
IR_dprintk(2, "JVC decode started at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
again:
switch (data->state) {
case STATE_INACTIVE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
break;
data->count = 0;
data->first = true;
data->toggle = !data->toggle;
data->state = STATE_HEADER_SPACE;
return 0;
case STATE_HEADER_SPACE:
if (ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_HEADER_SPACE, JVC_UNIT / 2))
break;
data->state = STATE_BIT_PULSE;
return 0;
case STATE_BIT_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_BIT_PULSE, JVC_UNIT / 2))
break;
data->state = STATE_BIT_SPACE;
return 0;
case STATE_BIT_SPACE:
if (ev.pulse)
break;
data->bits <<= 1;
if (eq_margin(ev.duration, JVC_BIT_1_SPACE, JVC_UNIT / 2)) {
data->bits |= 1;
decrease_duration(&ev, JVC_BIT_1_SPACE);
} else if (eq_margin(ev.duration, JVC_BIT_0_SPACE, JVC_UNIT / 2))
decrease_duration(&ev, JVC_BIT_0_SPACE);
else
break;
data->count++;
if (data->count == JVC_NBITS)
data->state = STATE_TRAILER_PULSE;
else
data->state = STATE_BIT_PULSE;
return 0;
case STATE_TRAILER_PULSE:
if (!ev.pulse)
break;
if (!eq_margin(ev.duration, JVC_TRAILER_PULSE, JVC_UNIT / 2))
break;
data->state = STATE_TRAILER_SPACE;
return 0;
case STATE_TRAILER_SPACE:
if (ev.pulse)
break;
if (!geq_margin(ev.duration, JVC_TRAILER_SPACE, JVC_UNIT / 2))
break;
if (data->first) {
u32 scancode;
scancode = (bitrev8((data->bits >> 8) & 0xff) << 8) |
(bitrev8((data->bits >> 0) & 0xff) << 0);
IR_dprintk(1, "JVC scancode 0x%04x\n", scancode);
rc_keydown(dev, scancode, data->toggle);
data->first = false;
data->old_bits = data->bits;
} else if (data->bits == data->old_bits) {
IR_dprintk(1, "JVC repeat\n");
rc_repeat(dev);
} else {
IR_dprintk(1, "JVC invalid repeat msg\n");
break;
}
data->count = 0;
data->state = STATE_CHECK_REPEAT;
return 0;
case STATE_CHECK_REPEAT:
if (!ev.pulse)
break;
if (eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
data->state = STATE_INACTIVE;
else
data->state = STATE_BIT_PULSE;
goto again;
}
out:
IR_dprintk(1, "JVC decode failed at state %d (%uus %s)\n",
data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
static struct ir_raw_handler jvc_handler = {
.protocols = RC_TYPE_JVC,
.decode = ir_jvc_decode,
};
static int __init ir_jvc_decode_init(void)
{
ir_raw_handler_register(&jvc_handler);
printk(KERN_INFO "IR JVC protocol handler initialized\n");
return 0;
}
static void __exit ir_jvc_decode_exit(void)
{
ir_raw_handler_unregister(&jvc_handler);
}
module_init(ir_jvc_decode_init);
module_exit(ir_jvc_decode_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("JVC IR protocol decoder");
| gpl-2.0 |
hustard/h2fs | sound/soc/samsung/s3c24xx_uda134x.c | 3084 | 9260 | /*
* Modifications by Christian Pellegrin <chripell@evolware.org>
*
* s3c24xx_uda134x.c -- S3C24XX_UDA134X ALSA SoC Audio board driver
*
* Copyright 2007 Dension Audio Systems Ltd.
* Author: Zoltan Devai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/s3c24xx_uda134x.h>
#include "regs-iis.h"
#include "s3c24xx-i2s.h"
/* #define ENFORCE_RATES 1 */
/*
Unfortunately the S3C24XX in master mode has a limited capacity of
generating the clock for the codec. If you define this only rates
that are really available will be enforced. But be careful, most
user level application just want the usual sampling frequencies (8,
11.025, 22.050, 44.1 kHz) and anyway resampling is a costly
operation for embedded systems. So if you aren't very lucky or your
hardware engineer wasn't very forward-looking it's better to leave
this undefined. If you do so an approximate value for the requested
sampling rate in the range -/+ 5% will be chosen. If this in not
possible an error will be returned.
*/
static struct clk *xtal;
static struct clk *pclk;
/* this is need because we don't have a place where to keep the
* pointers to the clocks in each substream. We get the clocks only
* when we are actually using them so we don't block stuff like
* frequency change or oscillator power-off */
static int clk_users;
static DEFINE_MUTEX(clk_lock);
static unsigned int rates[33 * 2];
#ifdef ENFORCE_RATES
static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
#endif
static struct platform_device *s3c24xx_uda134x_snd_device;
static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
{
int ret = 0;
#ifdef ENFORCE_RATES
struct snd_pcm_runtime *runtime = substream->runtime;
#endif
mutex_lock(&clk_lock);
pr_debug("%s %d\n", __func__, clk_users);
if (clk_users == 0) {
xtal = clk_get(&s3c24xx_uda134x_snd_device->dev, "xtal");
if (IS_ERR(xtal)) {
printk(KERN_ERR "%s cannot get xtal\n", __func__);
ret = PTR_ERR(xtal);
} else {
pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
"pclk");
if (IS_ERR(pclk)) {
printk(KERN_ERR "%s cannot get pclk\n",
__func__);
clk_put(xtal);
ret = PTR_ERR(pclk);
}
}
if (!ret) {
int i, j;
for (i = 0; i < 2; i++) {
int fs = i ? 256 : 384;
rates[i*33] = clk_get_rate(xtal) / fs;
for (j = 1; j < 33; j++)
rates[i*33 + j] = clk_get_rate(pclk) /
(j * fs);
}
}
}
clk_users += 1;
mutex_unlock(&clk_lock);
if (!ret) {
#ifdef ENFORCE_RATES
ret = snd_pcm_hw_constraint_list(runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates);
if (ret < 0)
printk(KERN_ERR "%s cannot set constraints\n",
__func__);
#endif
}
return ret;
}
static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
{
mutex_lock(&clk_lock);
pr_debug("%s %d\n", __func__, clk_users);
clk_users -= 1;
if (clk_users == 0) {
clk_put(xtal);
xtal = NULL;
clk_put(pclk);
pclk = NULL;
}
mutex_unlock(&clk_lock);
}
static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int clk = 0;
int ret = 0;
int clk_source, fs_mode;
unsigned long rate = params_rate(params);
long err, cerr;
unsigned int div;
int i, bi;
err = 999999;
bi = 0;
for (i = 0; i < 2*33; i++) {
cerr = rates[i] - rate;
if (cerr < 0)
cerr = -cerr;
if (cerr < err) {
err = cerr;
bi = i;
}
}
if (bi / 33 == 1)
fs_mode = S3C2410_IISMOD_256FS;
else
fs_mode = S3C2410_IISMOD_384FS;
if (bi % 33 == 0) {
clk_source = S3C24XX_CLKSRC_MPLL;
div = 1;
} else {
clk_source = S3C24XX_CLKSRC_PCLK;
div = bi % 33;
}
pr_debug("%s desired rate %lu, %d\n", __func__, rate, bi);
clk = (fs_mode == S3C2410_IISMOD_384FS ? 384 : 256) * rate;
pr_debug("%s will use: %s %s %d sysclk %d err %ld\n", __func__,
fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS",
clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK",
div, clk, err);
if ((err * 100 / rate) > 5) {
printk(KERN_ERR "S3C24XX_UDA134X: effective frequency "
"too different from desired (%ld%%)\n",
err * 100 / rate);
return -EINVAL;
}
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source , clk,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, fs_mode);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
S3C2410_IISMOD_32FS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
S3C24XX_PRESCALE(div, div));
if (ret < 0)
return ret;
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops s3c24xx_uda134x_ops = {
.startup = s3c24xx_uda134x_startup,
.shutdown = s3c24xx_uda134x_shutdown,
.hw_params = s3c24xx_uda134x_hw_params,
};
static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
.name = "UDA134X",
.stream_name = "UDA134X",
.codec_name = "uda134x-codec",
.codec_dai_name = "uda134x-hifi",
.cpu_dai_name = "s3c24xx-iis",
.ops = &s3c24xx_uda134x_ops,
.platform_name = "s3c24xx-iis",
};
static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
.name = "S3C24XX_UDA134X",
.owner = THIS_MODULE,
.dai_link = &s3c24xx_uda134x_dai_link,
.num_links = 1,
};
static struct s3c24xx_uda134x_platform_data *s3c24xx_uda134x_l3_pins;
static void setdat(int v)
{
gpio_set_value(s3c24xx_uda134x_l3_pins->l3_data, v > 0);
}
static void setclk(int v)
{
gpio_set_value(s3c24xx_uda134x_l3_pins->l3_clk, v > 0);
}
static void setmode(int v)
{
gpio_set_value(s3c24xx_uda134x_l3_pins->l3_mode, v > 0);
}
/* FIXME - This must be codec platform data but in which board file ?? */
static struct uda134x_platform_data s3c24xx_uda134x = {
.l3 = {
.setdat = setdat,
.setclk = setclk,
.setmode = setmode,
.data_hold = 1,
.data_setup = 1,
.clock_high = 1,
.mode_hold = 1,
.mode = 1,
.mode_setup = 1,
},
};
static int s3c24xx_uda134x_setup_pin(int pin, char *fun)
{
if (gpio_request(pin, "s3c24xx_uda134x") < 0) {
printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
"l3 %s pin already in use", fun);
return -EBUSY;
}
gpio_direction_output(pin, 0);
return 0;
}
static int s3c24xx_uda134x_probe(struct platform_device *pdev)
{
int ret;
printk(KERN_INFO "S3C24XX_UDA134X SoC Audio driver\n");
s3c24xx_uda134x_l3_pins = pdev->dev.platform_data;
if (s3c24xx_uda134x_l3_pins == NULL) {
printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
"unable to find platform data\n");
return -ENODEV;
}
s3c24xx_uda134x.power = s3c24xx_uda134x_l3_pins->power;
s3c24xx_uda134x.model = s3c24xx_uda134x_l3_pins->model;
if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_data,
"data") < 0)
return -EBUSY;
if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_clk,
"clk") < 0) {
gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
return -EBUSY;
}
if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_mode,
"mode") < 0) {
gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
return -EBUSY;
}
s3c24xx_uda134x_snd_device = platform_device_alloc("soc-audio", -1);
if (!s3c24xx_uda134x_snd_device) {
printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
"Unable to register\n");
return -ENOMEM;
}
platform_set_drvdata(s3c24xx_uda134x_snd_device,
&snd_soc_s3c24xx_uda134x);
platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x));
ret = platform_device_add(s3c24xx_uda134x_snd_device);
if (ret) {
printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");
platform_device_put(s3c24xx_uda134x_snd_device);
}
return ret;
}
static int s3c24xx_uda134x_remove(struct platform_device *pdev)
{
platform_device_unregister(s3c24xx_uda134x_snd_device);
gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
gpio_free(s3c24xx_uda134x_l3_pins->l3_mode);
return 0;
}
static struct platform_driver s3c24xx_uda134x_driver = {
.probe = s3c24xx_uda134x_probe,
.remove = s3c24xx_uda134x_remove,
.driver = {
.name = "s3c24xx_uda134x",
.owner = THIS_MODULE,
},
};
module_platform_driver(s3c24xx_uda134x_driver);
MODULE_AUTHOR("Zoltan Devai, Christian Pellegrin <chripell@evolware.org>");
MODULE_DESCRIPTION("S3C24XX_UDA134X ALSA SoC audio driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
faux123/Nexus-grouper | arch/powerpc/kernel/ftrace.c | 4364 | 14632 | /*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
*
* Added function graph tracer code, taken from x86 that was written
* by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
*
*/
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static unsigned int
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
unsigned int op;
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
return op;
}
static int
ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
{
unsigned int replaced;
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
*/
/* read the text we want to modify */
if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (replaced != old)
return -EINVAL;
/* replace the text with the new text */
if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0;
}
/*
* Helper functions that are the same for both PPC64 and PPC32.
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
/* use the create_branch to verify that this offset can be branched */
return create_branch((unsigned int *)ip, addr, 0);
}
#ifdef CONFIG_MODULES
static int is_bl_op(unsigned int op)
{
return (op & 0xfc000003) == 0x48000001;
}
static unsigned long find_bl_target(unsigned long ip, unsigned int op)
{
static int offset;
offset = (op & 0x03fffffc);
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
return ip + (long)offset;
}
#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int op;
unsigned int jmp[5];
unsigned long ptr;
unsigned long ip = rec->ip;
unsigned long tramp;
int offset;
/* read where this goes */
if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
return -EFAULT;
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
/*
* On PPC64 the trampoline looks like:
* 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
* 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
* Where the bytes 2,3,6 and 7 make up the 32bit offset
* to the TOC that holds the pointer.
* to jump to.
* 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
* 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
* The actually address is 32 bytes from the offset
* into the TOC.
* 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
*/
pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
pr_devel(" %08x %08x", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
((jmp[1] & 0xffff0000) != 0x398c0000) ||
(jmp[2] != 0xf8410028) ||
(jmp[3] != 0xe96c0020) ||
(jmp[4] != 0xe84c0028)) {
printk(KERN_ERR "Not a trampoline\n");
return -EINVAL;
}
/* The bottom half is signed extended */
offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
(int)((short)jmp[1]);
pr_devel(" %x ", offset);
/* get the address this jumps too */
tramp = mod->arch.toc + offset + 32;
pr_devel("toc: %lx", tramp);
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
/* This should match what was called */
if (ptr != ppc_function_entry((void *)addr)) {
printk(KERN_ERR "addr does not match %lx\n", ptr);
return -EINVAL;
}
/*
* We want to nop the line, but the next line is
* 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
* This needs to be turned to a nop too.
*/
if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
return -EFAULT;
if (op != 0xe8410028) {
printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
return -EINVAL;
}
/*
* Milton Miller pointed out that we can not blindly do nops.
* If a task was preempted when calling a trace function,
* the nops will remove the way to restore the TOC in r2
* and the r2 TOC will get corrupted.
*/
/*
* Replace:
* bl <tramp> <==== will be replaced with "b 1f"
* ld r2,40(r1)
* 1:
*/
op = 0x48000008; /* b +8 */
if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0;
}
#else /* !PPC64 */
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int op;
unsigned int jmp[4];
unsigned long ip = rec->ip;
unsigned long tramp;
if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
/*
* On PPC32 the trampoline looks like:
* 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
* 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
* 0x7d, 0x69, 0x03, 0xa6 mtctr r11
* 0x4e, 0x80, 0x04, 0x20 bctr
*/
pr_devel("ip:%lx jumps to %lx", ip, tramp);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
pr_devel(" %08x %08x ", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
((jmp[1] & 0xffff0000) != 0x396b0000) ||
(jmp[2] != 0x7d6903a6) ||
(jmp[3] != 0x4e800420)) {
printk(KERN_ERR "Not a trampoline\n");
return -EINVAL;
}
tramp = (jmp[1] & 0xffff) |
((jmp[0] & 0xffff) << 16);
if (tramp & 0x8000)
tramp -= 0x10000;
pr_devel(" %lx ", tramp);
if (tramp != addr) {
printk(KERN_ERR
"Trampoline location %08lx does not match addr\n",
tramp);
return -EINVAL;
}
op = PPC_INST_NOP;
if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0;
}
#endif /* PPC64 */
#endif /* CONFIG_MODULES */
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
unsigned int old, new;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
new = PPC_INST_NOP;
return ftrace_modify_code(ip, old, new);
}
#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
* or it has been passed in.
*/
if (!rec->arch.mod) {
if (!mod) {
printk(KERN_ERR "No module loaded addr=%lx\n",
addr);
return -EFAULT;
}
rec->arch.mod = mod;
} else if (mod) {
if (mod != rec->arch.mod) {
printk(KERN_ERR
"Record mod %p not equal to passed in mod %p\n",
rec->arch.mod, mod);
return -EINVAL;
}
/* nothing to do if mod == rec->arch.mod */
} else
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
#else
/* We should not get here without modules */
return -EINVAL;
#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_MODULES
#ifdef CONFIG_PPC64
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int op[2];
unsigned long ip = rec->ip;
/* read where this goes */
if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
return -EFAULT;
/*
* It should be pointing to two nops or
* b +8; ld r2,40(r1)
*/
if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
printk(KERN_ERR "No ftrace trampoline\n");
return -EINVAL;
}
/* create the branch to the trampoline */
op[0] = create_branch((unsigned int *)ip,
rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (!op[0]) {
printk(KERN_ERR "REL24 out of range!\n");
return -EINVAL;
}
/* ld r2,40(r1) */
op[1] = 0xe8410028;
pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0;
}
#else
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int op;
unsigned long ip = rec->ip;
/* read where this goes */
if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* It should be pointing to a nop */
if (op != PPC_INST_NOP) {
printk(KERN_ERR "Expected NOP but have %x\n", op);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
printk(KERN_ERR "No ftrace trampoline\n");
return -EINVAL;
}
/* create the branch to the trampoline */
op = create_branch((unsigned int *)ip,
rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (!op) {
printk(KERN_ERR "REL24 out of range!\n");
return -EINVAL;
}
pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
return -EPERM;
flush_icache_range(ip, ip + 8);
return 0;
}
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_MODULES */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
unsigned int old, new;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = PPC_INST_NOP;
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
}
#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
* already have a module defined.
*/
if (!rec->arch.mod) {
printk(KERN_ERR "No module loaded\n");
return -EINVAL;
}
return __ftrace_make_call(rec, addr);
#else
/* We should not get here without modules */
return -EINVAL;
#endif /* CONFIG_MODULES */
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned int old, new;
int ret;
old = *(unsigned int *)&ftrace_call;
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
return ret;
}
int __init ftrace_dyn_arch_init(void *data)
{
/* caller expects data to be zero */
unsigned long *p = data;
*p = 0;
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
extern void ftrace_graph_stub(void);
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
unsigned int old, new;
old = ftrace_call_replace(ip, stub, 0);
new = ftrace_call_replace(ip, addr, 0);
return ftrace_modify_code(ip, old, new);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
unsigned int old, new;
old = ftrace_call_replace(ip, addr, 0);
new = ftrace_call_replace(ip, stub, 0);
return ftrace_modify_code(ip, old, new);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_PPC64
extern void mod_return_to_handler(void);
#endif
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
int faulted;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
#ifdef CONFIG_PPC64
/* non core kernel code needs to save and restore the TOC */
if (REGION_ID(self_addr) != KERNEL_REGION_ID)
return_hooker = (unsigned long)&mod_return_to_handler;
#endif
return_hooker = ppc_function_entry((void *)return_hooker);
/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: " PPC_LL "%[old], 0(%[parent])\n"
"2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
" li %[faulted], 0\n"
"3:\n"
".section .fixup, \"ax\"\n"
"4: li %[faulted], 1\n"
" b 3b\n"
".previous\n"
".section __ex_table,\"a\"\n"
PPC_LONG_ALIGN "\n"
PPC_LONG "1b,4b\n"
PPC_LONG "2b,4b\n"
".previous"
: [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
if (unlikely(faulted)) {
ftrace_graph_stop();
WARN_ON(1);
return;
}
if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
unsigned long __init arch_syscall_addr(int nr)
{
return sys_call_table[nr*2];
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
| gpl-2.0 |
deepsrd/android_kernel_nx507j | drivers/media/video/cx23885/cx23885-cards.c | 4876 | 45692 | /*
* Driver for the Conexant CX23885 PCIe bridge
*
* Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <media/cx25840.h>
#include <linux/firmware.h>
#include <misc/altera.h>
#include "cx23885.h"
#include "tuner-xc2028.h"
#include "netup-eeprom.h"
#include "netup-init.h"
#include "altera-ci.h"
#include "xc4000.h"
#include "xc5000.h"
#include "cx23888-ir.h"
static unsigned int netup_card_rev = 1;
module_param(netup_card_rev, int, 0644);
MODULE_PARM_DESC(netup_card_rev,
"NetUP Dual DVB-T/C CI card revision");
static unsigned int enable_885_ir;
module_param(enable_885_ir, int, 0644);
MODULE_PARM_DESC(enable_885_ir,
"Enable integrated IR controller for supported\n"
"\t\t CX2388[57] boards that are wired for it:\n"
"\t\t\tHVR-1250 (reported safe)\n"
"\t\t\tTeVii S470 (reported unsafe)\n"
"\t\t This can cause an interrupt storm with some cards.\n"
"\t\t Default: 0 [Disabled]");
/* ------------------------------------------------------------------ */
/* board config info */
struct cx23885_board cx23885_boards[] = {
[CX23885_BOARD_UNKNOWN] = {
.name = "UNKNOWN/GENERIC",
/* Ensure safe default for unknown boards */
.clk_freq = 0,
.input = {{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 0,
}, {
.type = CX23885_VMUX_COMPOSITE2,
.vmux = 1,
}, {
.type = CX23885_VMUX_COMPOSITE3,
.vmux = 2,
}, {
.type = CX23885_VMUX_COMPOSITE4,
.vmux = 3,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1800lp] = {
.name = "Hauppauge WinTV-HVR1800lp",
.portc = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00,
}, {
.type = CX23885_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1800] = {
.name = "Hauppauge WinTV-HVR1800",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_ENCODER,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = 0x42, /* 0x84 >> 1 */
.tuner_bus = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1,
.amux = CX25840_AUDIO8,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
.name = "Hauppauge WinTV-HVR1250",
.portc = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00,
}, {
.type = CX23885_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
} },
},
[CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP] = {
.name = "DViCO FusionHDTV5 Express",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1500Q] = {
.name = "Hauppauge WinTV-HVR1500Q",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1500] = {
.name = "Hauppauge WinTV-HVR1500",
.porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61, /* 0xc2 >> 1 */
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.gpio0 = 0,
} },
},
[CX23885_BOARD_HAUPPAUGE_HVR1200] = {
.name = "Hauppauge WinTV-HVR1200",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1700] = {
.name = "Hauppauge WinTV-HVR1700",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1400] = {
.name = "Hauppauge WinTV-HVR1400",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP] = {
.name = "DViCO FusionHDTV7 Dual Express",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP] = {
.name = "DViCO FusionHDTV DVB-T Dual Express",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
.name = "Leadtek Winfast PxDVR3200 H XC4000",
.porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC4000,
.tuner_addr = 0x61,
.radio_type = UNSET,
.radio_addr = ADDR_UNSET,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2 |
CX25840_NONE0_CH3,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
}, {
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_VIN7_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN8_CH3 |
CX25840_COMPONENT_ON,
} },
},
[CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
.name = "Compro VideoMate E650F",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_TBS_6920] = {
.name = "TurboSight TBS 6920",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_TEVII_S470] = {
.name = "TeVii S470",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_DVBWORLD_2005] = {
.name = "DVBWorld DVB-S2 2005",
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_NETUP_DUAL_DVBS2_CI] = {
.ci_type = 1,
.name = "NetUP Dual DVB-S2 CI",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1270] = {
.name = "Hauppauge WinTV-HVR1270",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1275] = {
.name = "Hauppauge WinTV-HVR1275",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1255] = {
.name = "Hauppauge WinTV-HVR1255",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1210] = {
.name = "Hauppauge WinTV-HVR1210",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_MYGICA_X8506] = {
.name = "Mygica X8506 DMB-TH",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_MAGICPRO_PROHDTVE2] = {
.name = "Magic-Pro ProHDTV Extreme 2",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_HAUPPAUGE_HVR1850] = {
.name = "Hauppauge WinTV-HVR1850",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_ENCODER,
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_ABSENT,
.tuner_addr = 0x42, /* 0x84 >> 1 */
.force_bff = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN5_CH2 |
CX25840_VIN2_CH1 |
CX25840_DIF_ON,
.amux = CX25840_AUDIO8,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN6_CH1,
.amux = CX25840_AUDIO7,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_VIN7_CH3 |
CX25840_VIN4_CH2 |
CX25840_VIN8_CH1 |
CX25840_SVIDEO_ON,
.amux = CX25840_AUDIO7,
} },
},
[CX23885_BOARD_COMPRO_VIDEOMATE_E800] = {
.name = "Compro VideoMate E800",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR1290] = {
.name = "Hauppauge WinTV-HVR1290",
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_MYGICA_X8558PRO] = {
.name = "Mygica X8558 PRO DMB-TH",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
[CX23885_BOARD_LEADTEK_WINFAST_PXTV1200] = {
.name = "LEADTEK WinFast PxTV1200",
.porta = CX23885_ANALOG_VIDEO,
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
.tuner_bus = 1,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2 |
CX25840_NONE0_CH3,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
}, {
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_VIN7_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN8_CH3 |
CX25840_COMPONENT_ON,
} },
},
[CX23885_BOARD_GOTVIEW_X5_3D_HYBRID] = {
.name = "GoTView X5 3D Hybrid",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x64,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
CX25840_VIN5_CH2,
.gpio0 = 0x02,
}, {
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX23885_VMUX_COMPOSITE1,
}, {
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
} },
},
[CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF] = {
.ci_type = 2,
.name = "NetUP Dual DVB-T/C-CI RF",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
.num_fds_portb = 2,
.num_fds_portc = 2,
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x64,
.input = { {
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE1,
} },
},
[CX23885_BOARD_MPX885] = {
.name = "MPX-885",
.porta = CX23885_ANALOG_VIDEO,
.input = {{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE1,
.amux = CX25840_AUDIO6,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE2,
.vmux = CX25840_COMPOSITE2,
.amux = CX25840_AUDIO6,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE3,
.vmux = CX25840_COMPOSITE3,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
}, {
.type = CX23885_VMUX_COMPOSITE4,
.vmux = CX25840_COMPOSITE4,
.amux = CX25840_AUDIO7,
.gpio0 = 0,
} },
},
[CX23885_BOARD_MYGICA_X8507] = {
.name = "Mygica X8507",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_COMPOSITE2,
.amux = CX25840_AUDIO8,
},
{
.type = CX23885_VMUX_COMPOSITE1,
.vmux = CX25840_COMPOSITE8,
},
{
.type = CX23885_VMUX_SVIDEO,
.vmux = CX25840_SVIDEO_LUMA3 |
CX25840_SVIDEO_CHROMA4,
},
{
.type = CX23885_VMUX_COMPONENT,
.vmux = CX25840_COMPONENT_ON |
CX25840_VIN1_CH1 |
CX25840_VIN6_CH2 |
CX25840_VIN7_CH3,
},
},
},
[CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL] = {
.name = "TerraTec Cinergy T PCIe Dual",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
/* ------------------------------------------------------------------ */
/* PCI subsystem IDs */
struct cx23885_subid cx23885_subids[] = {
{
.subvendor = 0x0070,
.subdevice = 0x3400,
.card = CX23885_BOARD_UNKNOWN,
}, {
.subvendor = 0x0070,
.subdevice = 0x7600,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800lp,
}, {
.subvendor = 0x0070,
.subdevice = 0x7800,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7801,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7809,
.card = CX23885_BOARD_HAUPPAUGE_HVR1800,
}, {
.subvendor = 0x0070,
.subdevice = 0x7911,
.card = CX23885_BOARD_HAUPPAUGE_HVR1250,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd500,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP,
}, {
.subvendor = 0x0070,
.subdevice = 0x7790,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
}, {
.subvendor = 0x0070,
.subdevice = 0x7797,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500Q,
}, {
.subvendor = 0x0070,
.subdevice = 0x7710,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500,
}, {
.subvendor = 0x0070,
.subdevice = 0x7717,
.card = CX23885_BOARD_HAUPPAUGE_HVR1500,
}, {
.subvendor = 0x0070,
.subdevice = 0x71d1,
.card = CX23885_BOARD_HAUPPAUGE_HVR1200,
}, {
.subvendor = 0x0070,
.subdevice = 0x71d3,
.card = CX23885_BOARD_HAUPPAUGE_HVR1200,
}, {
.subvendor = 0x0070,
.subdevice = 0x8101,
.card = CX23885_BOARD_HAUPPAUGE_HVR1700,
}, {
.subvendor = 0x0070,
.subdevice = 0x8010,
.card = CX23885_BOARD_HAUPPAUGE_HVR1400,
}, {
.subvendor = 0x18ac,
.subdevice = 0xd618,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP,
}, {
.subvendor = 0x18ac,
.subdevice = 0xdb78,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP,
}, {
.subvendor = 0x107d,
.subdevice = 0x6681,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
.subvendor = 0x107d,
.subdevice = 0x6f39,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
}, {
.subvendor = 0x185b,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
}, {
.subvendor = 0x6920,
.subdevice = 0x8888,
.card = CX23885_BOARD_TBS_6920,
}, {
.subvendor = 0xd470,
.subdevice = 0x9022,
.card = CX23885_BOARD_TEVII_S470,
}, {
.subvendor = 0x0001,
.subdevice = 0x2005,
.card = CX23885_BOARD_DVBWORLD_2005,
}, {
.subvendor = 0x1b55,
.subdevice = 0x2a2c,
.card = CX23885_BOARD_NETUP_DUAL_DVBS2_CI,
}, {
.subvendor = 0x0070,
.subdevice = 0x2211,
.card = CX23885_BOARD_HAUPPAUGE_HVR1270,
}, {
.subvendor = 0x0070,
.subdevice = 0x2215,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x221d,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x2251,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x2259,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x2291,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x2295,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x2299,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x229d,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x0070,
.subdevice = 0x22f0,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f1,
.card = CX23885_BOARD_HAUPPAUGE_HVR1255,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f2,
.card = CX23885_BOARD_HAUPPAUGE_HVR1275,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f3,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x0070,
.subdevice = 0x22f4,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210,
}, {
.subvendor = 0x0070,
.subdevice = 0x22f5,
.card = CX23885_BOARD_HAUPPAUGE_HVR1210, /* HVR1215 */
}, {
.subvendor = 0x14f1,
.subdevice = 0x8651,
.card = CX23885_BOARD_MYGICA_X8506,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8657,
.card = CX23885_BOARD_MAGICPRO_PROHDTVE2,
}, {
.subvendor = 0x0070,
.subdevice = 0x8541,
.card = CX23885_BOARD_HAUPPAUGE_HVR1850,
}, {
.subvendor = 0x1858,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E800,
}, {
.subvendor = 0x0070,
.subdevice = 0x8551,
.card = CX23885_BOARD_HAUPPAUGE_HVR1290,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8578,
.card = CX23885_BOARD_MYGICA_X8558PRO,
}, {
.subvendor = 0x107d,
.subdevice = 0x6f22,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXTV1200,
}, {
.subvendor = 0x5654,
.subdevice = 0x2390,
.card = CX23885_BOARD_GOTVIEW_X5_3D_HYBRID,
}, {
.subvendor = 0x1b55,
.subdevice = 0xe2e4,
.card = CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF,
}, {
.subvendor = 0x14f1,
.subdevice = 0x8502,
.card = CX23885_BOARD_MYGICA_X8507,
}, {
.subvendor = 0x153b,
.subdevice = 0x117e,
.card = CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
void cx23885_card_list(struct cx23885_dev *dev)
{
int i;
if (0 == dev->pci->subsystem_vendor &&
0 == dev->pci->subsystem_device) {
printk(KERN_INFO
"%s: Board has no valid PCIe Subsystem ID and can't\n"
"%s: be autodetected. Pass card=<n> insmod option\n"
"%s: to workaround that. Redirect complaints to the\n"
"%s: vendor of the TV card. Best regards,\n"
"%s: -- tux\n",
dev->name, dev->name, dev->name, dev->name, dev->name);
} else {
printk(KERN_INFO
"%s: Your board isn't known (yet) to the driver.\n"
"%s: Try to pick one of the existing card configs via\n"
"%s: card=<n> insmod option. Updating to the latest\n"
"%s: version might help as well.\n",
dev->name, dev->name, dev->name, dev->name);
}
printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
dev->name);
for (i = 0; i < cx23885_bcount; i++)
printk(KERN_INFO "%s: card=%d -> %s\n",
dev->name, i, cx23885_boards[i].name);
}
static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
case 22001:
/* WinTV-HVR1270 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Blast */
case 22009:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Blast */
case 22011:
/* WinTV-HVR1270 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Recv */
case 22019:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Recv */
case 22021:
/* WinTV-HVR1275 (PCIe, Retail, half height)
* ATSC/QAM and basic analog, IR Recv */
case 22029:
/* WinTV-HVR1210 (PCIe, Retail, half height)
* DVB-T and basic analog, IR Recv */
case 22101:
/* WinTV-HVR1270 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Blast */
case 22109:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Blast */
case 22111:
/* WinTV-HVR1270 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Recv */
case 22119:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Recv */
case 22121:
/* WinTV-HVR1275 (PCIe, Retail, full height)
* ATSC/QAM and basic analog, IR Recv */
case 22129:
/* WinTV-HVR1210 (PCIe, Retail, full height)
* DVB-T and basic analog, IR Recv */
case 71009:
/* WinTV-HVR1200 (PCIe, Retail, full height)
* DVB-T and basic analog */
case 71359:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71439:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71449:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71939:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71949:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71959:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 71979:
/* WinTV-HVR1200 (PCIe, OEM, half height)
* DVB-T and basic analog */
case 71999:
/* WinTV-HVR1200 (PCIe, OEM, full height)
* DVB-T and basic analog */
case 76601:
/* WinTV-HVR1800lp (PCIe, Retail, No IR, Dual
channel ATSC and MPEG2 HW Encoder */
case 77001:
/* WinTV-HVR1500 (Express Card, OEM, No IR, ATSC
and Basic analog */
case 77011:
/* WinTV-HVR1500 (Express Card, Retail, No IR, ATSC
and Basic analog */
case 77041:
/* WinTV-HVR1500Q (Express Card, OEM, No IR, ATSC/QAM
and Basic analog */
case 77051:
/* WinTV-HVR1500Q (Express Card, Retail, No IR, ATSC/QAM
and Basic analog */
case 78011:
/* WinTV-HVR1800 (PCIe, Retail, 3.5mm in, IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78501:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78521:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78531:
/* WinTV-HVR1800 (PCIe, OEM, RCA in, No IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 78631:
/* WinTV-HVR1800 (PCIe, OEM, No IR, No FM,
Dual channel ATSC and MPEG2 HW Encoder */
case 79001:
/* WinTV-HVR1250 (PCIe, Retail, IR, full height,
ATSC and Basic analog */
case 79101:
/* WinTV-HVR1250 (PCIe, Retail, IR, half height,
ATSC and Basic analog */
case 79501:
/* WinTV-HVR1250 (PCIe, No IR, half height,
ATSC [at least] and Basic analog) */
case 79561:
/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
ATSC and Basic analog */
case 79571:
/* WinTV-HVR1250 (PCIe, OEM, No IR, full height,
ATSC and Basic analog */
case 79671:
/* WinTV-HVR1250 (PCIe, OEM, No IR, half height,
ATSC and Basic analog */
case 80019:
/* WinTV-HVR1400 (Express Card, Retail, IR,
* DVB-T and Basic analog */
case 81509:
/* WinTV-HVR1700 (PCIe, OEM, No IR, half height)
* DVB-T and MPEG2 HW Encoder */
case 81519:
/* WinTV-HVR1700 (PCIe, OEM, No IR, full height)
* DVB-T and MPEG2 HW Encoder */
break;
case 85021:
/* WinTV-HVR1850 (PCIe, Retail, 3.5mm in, IR, FM,
Dual channel ATSC and MPEG2 HW Encoder */
break;
case 85721:
/* WinTV-HVR1290 (PCIe, OEM, RCA in, IR,
Dual channel ATSC and Basic analog */
break;
default:
printk(KERN_WARNING "%s: warning: "
"unknown hauppauge model #%d\n",
dev->name, tv.model);
break;
}
printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
dev->name, tv.model);
}
int cx23885_tuner_callback(void *priv, int component, int command, int arg)
{
struct cx23885_tsport *port = priv;
struct cx23885_dev *dev = port->dev;
u32 bitmask = 0;
if (command == XC2028_RESET_CLK)
return 0;
if (command != 0) {
printk(KERN_ERR "%s(): Unknown command 0x%x.\n",
__func__, command);
return -EINVAL;
}
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
/* Tuner Reset Command */
bitmask = 0x04;
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
/* Two identical tuners on two different i2c buses,
* we need to reset the correct gpio. */
if (port->nr == 1)
bitmask = 0x01;
else if (port->nr == 2)
bitmask = 0x04;
break;
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
/* Tuner Reset Command */
bitmask = 0x02;
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
altera_ci_tuner_reset(dev, port->nr);
break;
}
if (bitmask) {
/* Drive the tuner into reset and back out */
cx_clear(GP0_IO, bitmask);
mdelay(200);
cx_set(GP0_IO, bitmask);
}
return 0;
}
void cx23885_gpio_setup(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* GPIO-0 cx24227 demodulator reset */
cx_set(GP0_IO, 0x00010001); /* Bring the part out of reset */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
/* GPIO-0 cx24227 demodulator */
/* GPIO-2 xc3028 tuner */
/* Put the parts into reset */
cx_set(GP0_IO, 0x00050000);
cx_clear(GP0_IO, 0x00000005);
msleep(5);
/* Bring the parts out of reset */
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
/* GPIO-0 cx24227 demodulator reset */
/* GPIO-2 xc5000 tuner reset */
cx_set(GP0_IO, 0x00050005); /* Bring the part out of reset */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
/* GPIO-0 656_CLK */
/* GPIO-1 656_D0 */
/* GPIO-2 8295A Reset */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* GPIO-19 IR_RX */
/* CX23417 GPIO's */
/* EIO15 Zilog Reset */
/* EIO14 S5H1409/CX24227 Reset */
mc417_gpio_enable(dev, GPIO_15 | GPIO_14, 1);
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_15 | GPIO_14);
mdelay(100);
/* Bring the demod and blaster out of reset */
mc417_gpio_set(dev, GPIO_15 | GPIO_14);
mdelay(100);
/* Force the TDA8295A into reset and back */
cx23885_gpio_enable(dev, GPIO_2, 1);
cx23885_gpio_set(dev, GPIO_2);
mdelay(20);
cx23885_gpio_clear(dev, GPIO_2);
mdelay(20);
cx23885_gpio_set(dev, GPIO_2);
mdelay(20);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1200:
/* GPIO-0 tda10048 demodulator reset */
/* GPIO-2 tda18271 tuner reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1700:
/* GPIO-0 TDA10048 demodulator reset */
/* GPIO-2 TDA8295A Reset */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* The following GPIO's are on the interna AVCore (cx25840) */
/* GPIO-19 IR_RX */
/* GPIO-20 IR_TX 416/DVBT Select */
/* GPIO-21 IIS DAT */
/* GPIO-22 IIS WCLK */
/* GPIO-23 IIS BCLK */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1400:
/* GPIO-0 Dibcom7000p demodulator reset */
/* GPIO-2 xc3028L tuner reset */
/* GPIO-13 LED */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00050000);
mdelay(20);
cx_clear(GP0_IO, 0x00000005);
mdelay(20);
cx_set(GP0_IO, 0x00050005);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
/* GPIO-0 xc5000 tuner reset i2c bus 0 */
/* GPIO-1 s5h1409 demod reset i2c bus 0 */
/* GPIO-2 xc5000 tuner reset i2c bus 1 */
/* GPIO-3 s5h1409 demod reset i2c bus 0 */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
mdelay(20);
cx_clear(GP0_IO, 0x0000000f);
mdelay(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
/* GPIO-0 portb xc3028 reset */
/* GPIO-1 portb zl10353 reset */
/* GPIO-2 portc xc3028 reset */
/* GPIO-3 portc zl10353 reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x000f0000);
mdelay(20);
cx_clear(GP0_IO, 0x0000000f);
mdelay(20);
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
/* GPIO-2 xc3028 tuner reset */
/* The following GPIO's are on the internal AVCore (cx25840) */
/* GPIO-? zl10353 demod reset */
/* Put the parts into reset and back */
cx_set(GP0_IO, 0x00040000);
mdelay(20);
cx_clear(GP0_IO, 0x00000004);
mdelay(20);
cx_set(GP0_IO, 0x00040004);
break;
case CX23885_BOARD_TBS_6920:
cx_write(MC417_CTL, 0x00000036);
cx_write(MC417_OEN, 0x00001000);
cx_set(MC417_RWD, 0x00000002);
mdelay(200);
cx_clear(MC417_RWD, 0x00000800);
mdelay(200);
cx_set(MC417_RWD, 0x00000800);
mdelay(200);
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
/* GPIO-0 INTA from CiMax1
GPIO-1 INTB from CiMax2
GPIO-2 reset chips
GPIO-3 to GPIO-10 data/addr for CA
GPIO-11 ~CS0 to CiMax1
GPIO-12 ~CS1 to CiMax2
GPIO-13 ADL0 load LSB addr
GPIO-14 ADL1 load MSB addr
GPIO-15 ~RDY from CiMax
GPIO-17 ~RD to CiMax
GPIO-18 ~WR to CiMax
*/
cx_set(GP0_IO, 0x00040000); /* GPIO as out */
/* GPIO1 and GPIO2 as INTA and INTB from CiMaxes, reset low */
cx_clear(GP0_IO, 0x00030004);
mdelay(100);/* reset delay */
cx_set(GP0_IO, 0x00040004); /* GPIO as out, reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO3-18 pins */
/* GPIO-15 IN as ~ACK, rest as OUT */
cx_write(MC417_OEN, 0x00001000);
/* ~RD, ~WR high; ADL0, ADL1 low; ~CS0, ~CS1 high */
cx_write(MC417_RWD, 0x0000c300);
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
/* GPIO-6 I2C Gate which can isolate the demod from the bus */
/* GPIO-9 Demod reset */
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
cx23885_gpio_clear(dev, GPIO_9);
mdelay(20);
cx23885_gpio_set(dev, GPIO_9);
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_MYGICA_X8507:
/* GPIO-0 (0)Analog / (1)Digital TV */
/* GPIO-1 reset XC5000 */
/* GPIO-2 reset LGS8GL5 / LGS8G75 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1 | GPIO_2, 1);
cx23885_gpio_clear(dev, GPIO_1 | GPIO_2);
mdelay(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1 | GPIO_2);
mdelay(100);
break;
case CX23885_BOARD_MYGICA_X8558PRO:
/* GPIO-0 reset first ATBM8830 */
/* GPIO-1 reset second ATBM8830 */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1, 1);
cx23885_gpio_clear(dev, GPIO_0 | GPIO_1);
mdelay(100);
cx23885_gpio_set(dev, GPIO_0 | GPIO_1);
mdelay(100);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
/* GPIO-0 656_CLK */
/* GPIO-1 656_D0 */
/* GPIO-2 Wake# */
/* GPIO-3-10 cx23417 data0-7 */
/* GPIO-11-14 cx23417 addr0-3 */
/* GPIO-15-18 cx23417 READY, CS, RD, WR */
/* GPIO-19 IR_RX */
/* GPIO-20 C_IR_TX */
/* GPIO-21 I2S DAT */
/* GPIO-22 I2S WCLK */
/* GPIO-23 I2S BCLK */
/* ALT GPIO: EXP GPIO LATCH */
/* CX23417 GPIO's */
/* GPIO-14 S5H1411/CX24228 Reset */
/* GPIO-13 EEPROM write protect */
mc417_gpio_enable(dev, GPIO_14 | GPIO_13, 1);
/* Put the demod into reset and protect the eeprom */
mc417_gpio_clear(dev, GPIO_14 | GPIO_13);
mdelay(100);
/* Bring the demod out of reset */
mc417_gpio_set(dev, GPIO_14);
mdelay(100);
/* CX24228 GPIO */
/* Connected to IF / Mux */
break;
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
cx_set(GP0_IO, 0x00010001); /* Bring the part out of reset */
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
/* GPIO-0 ~INT in
GPIO-1 TMS out
GPIO-2 ~reset chips out
GPIO-3 to GPIO-10 data/addr for CA in/out
GPIO-11 ~CS out
GPIO-12 ADDR out
GPIO-13 ~WR out
GPIO-14 ~RD out
GPIO-15 ~RDY in
GPIO-16 TCK out
GPIO-17 TDO in
GPIO-18 TDI out
*/
cx_set(GP0_IO, 0x00060000); /* GPIO-1,2 as out */
/* GPIO-0 as INT, reset & TMS low */
cx_clear(GP0_IO, 0x00010006);
mdelay(100);/* reset delay */
cx_set(GP0_IO, 0x00000004); /* reset high */
cx_write(MC417_CTL, 0x00000037);/* enable GPIO-3..18 pins */
/* GPIO-17 is TDO in, GPIO-15 is ~RDY in, rest is out */
cx_write(MC417_OEN, 0x00005000);
/* ~RD, ~WR high; ADDR low; ~CS high */
cx_write(MC417_RWD, 0x00000d00);
/* enable irq */
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
}
}
int cx23885_ir_init(struct cx23885_dev *dev)
{
static struct v4l2_subdev_io_pin_config ir_rxtx_pin_cfg[] = {
{
.flags = V4L2_SUBDEV_IO_PIN_INPUT,
.pin = CX23885_PIN_IR_RX_GPIO19,
.function = CX23885_PAD_IR_RX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}, {
.flags = V4L2_SUBDEV_IO_PIN_OUTPUT,
.pin = CX23885_PIN_IR_TX_GPIO20,
.function = CX23885_PAD_IR_TX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}
};
const size_t ir_rxtx_pin_cfg_count = ARRAY_SIZE(ir_rxtx_pin_cfg);
static struct v4l2_subdev_io_pin_config ir_rx_pin_cfg[] = {
{
.flags = V4L2_SUBDEV_IO_PIN_INPUT,
.pin = CX23885_PIN_IR_RX_GPIO19,
.function = CX23885_PAD_IR_RX,
.value = 0,
.strength = CX25840_PIN_DRIVE_MEDIUM,
}
};
const size_t ir_rx_pin_cfg_count = ARRAY_SIZE(ir_rx_pin_cfg);
struct v4l2_subdev_ir_parameters params;
int ret = 0;
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
ret = cx23888_ir_probe(dev);
if (ret)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rx_pin_cfg_count, ir_rx_pin_cfg);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
ret = cx23888_ir_probe(dev);
if (ret)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
/*
* For these boards we need to invert the Tx output via the
* IR controller to have the LED off while idle
*/
v4l2_subdev_call(dev->sd_ir, ir, tx_g_parameters, ¶ms);
params.enable = false;
params.shutdown = false;
params.invert_level = true;
v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, ¶ms);
params.shutdown = true;
v4l2_subdev_call(dev->sd_ir, ir, tx_s_parameters, ¶ms);
break;
case CX23885_BOARD_TEVII_S470:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
if (dev->sd_ir == NULL) {
ret = -ENODEV;
break;
}
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rx_pin_cfg_count, ir_rx_pin_cfg);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (!enable_885_ir)
break;
dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_AV_CORE);
if (dev->sd_ir == NULL) {
ret = -ENODEV;
break;
}
v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
ir_rxtx_pin_cfg_count, ir_rxtx_pin_cfg);
break;
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
request_module("ir-kbd-i2c");
break;
}
return ret;
}
void cx23885_ir_fini(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
cx23885_irq_remove(dev, PCI_MSK_IR);
cx23888_ir_remove(dev);
dev->sd_ir = NULL;
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
cx23885_irq_remove(dev, PCI_MSK_AV_CORE);
/* sd_ir is a duplicate pointer to the AV Core, just clear it */
dev->sd_ir = NULL;
break;
}
}
int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
{
int data;
int tdo = 0;
struct cx23885_dev *dev = (struct cx23885_dev *)device;
/*TMS*/
data = ((cx_read(GP0_IO)) & (~0x00000002));
data |= (tms ? 0x00020002 : 0x00020000);
cx_write(GP0_IO, data);
/*TDI*/
data = ((cx_read(MC417_RWD)) & (~0x0000a000));
data |= (tdi ? 0x00008000 : 0);
cx_write(MC417_RWD, data);
if (read_tdo)
tdo = (data & 0x00004000) ? 1 : 0; /*TDO*/
cx_write(MC417_RWD, data | 0x00002000);
udelay(1);
/*TCK*/
cx_write(MC417_RWD, data);
return tdo;
}
void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
{
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_IR);
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (dev->sd_ir)
cx23885_irq_add_enable(dev, PCI_MSK_AV_CORE);
break;
}
}
void cx23885_card_setup(struct cx23885_dev *dev)
{
struct cx23885_tsport *ts1 = &dev->ts1;
struct cx23885_tsport *ts2 = &dev->ts2;
static u8 eeprom[256];
if (dev->i2c_bus[0].i2c_rc == 0) {
dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
tveeprom_read(&dev->i2c_bus[0].i2c_client,
eeprom, sizeof(eeprom));
}
switch (dev->board) {
case CX23885_BOARD_HAUPPAUGE_HVR1250:
if (dev->i2c_bus[0].i2c_rc == 0) {
if (eeprom[0x80] != 0x84)
hauppauge_eeprom(dev, eeprom+0xc0);
else
hauppauge_eeprom(dev, eeprom+0x80);
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0x80);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
}
switch (dev->board) {
case CX23885_BOARD_DVICO_FUSIONHDTV_7_DUAL_EXP:
case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
/* break omitted intentionally */
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
/* Defaults for VID B - Analog encoder */
/* DREQ_POL, SMODE, PUNC_CLK, MCLK_POL Serial bus + punc clk */
ts1->gen_ctrl_val = 0x10e;
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
/* APB_TSVALERR_POL (active low)*/
ts1->vld_misc_val = 0x2000;
ts1->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4 | 0xc);
cx_write(0x130184, 0xc);
/* Defaults for VID C */
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TBS_6920:
ts1->gen_ctrl_val = 0x4; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_DVBWORLD_2005:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_MYGICA_X8558PRO:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
default:
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
}
/* Certain boards support analog, or require the avcore to be
* loaded, ensure this happens.
*/
switch (dev->board) {
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
case CX23885_BOARD_GOTVIEW_X5_3D_HYBRID:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_MPX885:
case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
if (dev->sd_cx25840) {
dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
}
break;
}
/* AUX-PLL 27MHz CLK */
switch (dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
netup_initialize(dev);
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF: {
int ret;
const struct firmware *fw;
const char *filename = "dvb-netup-altera-01.fw";
char *action = "configure";
static struct netup_card_info cinfo;
struct altera_config netup_config = {
.dev = dev,
.action = action,
.jtag_io = netup_jtag_io,
};
netup_initialize(dev);
netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
if (netup_card_rev)
cinfo.rev = netup_card_rev;
switch (cinfo.rev) {
case 0x4:
filename = "dvb-netup-altera-04.fw";
break;
default:
filename = "dvb-netup-altera-01.fw";
break;
}
printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
cinfo.rev, filename);
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
printk(KERN_ERR "did not find the firmware file. (%s) "
"Please see linux/Documentation/dvb/ for more details "
"on firmware-problems.", filename);
else
altera_init(&netup_config, fw);
release_firmware(fw);
break;
}
}
}
/* ------------------------------------------------------------------ */
| gpl-2.0 |
TheNotOnly/linux-3.5 | drivers/media/dvb/frontends/tda10071.c | 4876 | 26603 | /*
* NXP TDA10071 + Conexant CX24118A DVB-S/S2 demodulator + tuner driver
*
* Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "tda10071_priv.h"
int tda10071_debug;
module_param_named(debug, tda10071_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
static struct dvb_frontend_ops tda10071_ops;
/* write multiple registers */
static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
u8 buf[len+1];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.i2c_address,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
}
};
buf[0] = reg;
memcpy(&buf[1], val, len);
ret = i2c_transfer(priv->i2c, msg, 1);
if (ret == 1) {
ret = 0;
} else {
warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* read multiple registers */
static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
int len)
{
int ret;
u8 buf[len];
struct i2c_msg msg[2] = {
{
.addr = priv->cfg.i2c_address,
.flags = 0,
.len = 1,
.buf = ®,
}, {
.addr = priv->cfg.i2c_address,
.flags = I2C_M_RD,
.len = sizeof(buf),
.buf = buf,
}
};
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, buf, len);
ret = 0;
} else {
warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* write single register */
static int tda10071_wr_reg(struct tda10071_priv *priv, u8 reg, u8 val)
{
return tda10071_wr_regs(priv, reg, &val, 1);
}
/* read single register */
static int tda10071_rd_reg(struct tda10071_priv *priv, u8 reg, u8 *val)
{
return tda10071_rd_regs(priv, reg, val, 1);
}
/* write single register with mask */
int tda10071_wr_reg_mask(struct tda10071_priv *priv, u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = tda10071_rd_regs(priv, reg, &tmp, 1);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return tda10071_wr_regs(priv, reg, &val, 1);
}
/* read single register with mask */
int tda10071_rd_reg_mask(struct tda10071_priv *priv, u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
ret = tda10071_rd_regs(priv, reg, &tmp, 1);
if (ret)
return ret;
tmp &= mask;
/* find position of the first bit */
for (i = 0; i < 8; i++) {
if ((mask >> i) & 0x01)
break;
}
*val = tmp >> i;
return 0;
}
/* execute firmware command */
static int tda10071_cmd_execute(struct tda10071_priv *priv,
struct tda10071_cmd *cmd)
{
int ret, i;
u8 tmp;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
/* write cmd and args for firmware */
ret = tda10071_wr_regs(priv, 0x00, cmd->args, cmd->len);
if (ret)
goto error;
/* start cmd execution */
ret = tda10071_wr_reg(priv, 0x1f, 1);
if (ret)
goto error;
/* wait cmd execution terminate */
for (i = 1000, tmp = 1; i && tmp; i--) {
ret = tda10071_rd_reg(priv, 0x1f, &tmp);
if (ret)
goto error;
usleep_range(200, 5000);
}
dbg("%s: loop=%d", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_set_tone(struct dvb_frontend *fe,
fe_sec_tone_mode_t fe_sec_tone_mode)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret;
u8 tone;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
dbg("%s: tone_mode=%d", __func__, fe_sec_tone_mode);
switch (fe_sec_tone_mode) {
case SEC_TONE_ON:
tone = 1;
break;
case SEC_TONE_OFF:
tone = 0;
break;
default:
dbg("%s: invalid fe_sec_tone_mode", __func__);
ret = -EINVAL;
goto error;
}
cmd.args[0x00] = CMD_LNB_PCB_CONFIG;
cmd.args[0x01] = 0;
cmd.args[0x02] = 0x00;
cmd.args[0x03] = 0x00;
cmd.args[0x04] = tone;
cmd.len = 0x05;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t fe_sec_voltage)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret;
u8 voltage;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
dbg("%s: voltage=%d", __func__, fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_13:
voltage = 0;
break;
case SEC_VOLTAGE_18:
voltage = 1;
break;
case SEC_VOLTAGE_OFF:
voltage = 0;
break;
default:
dbg("%s: invalid fe_sec_voltage", __func__);
ret = -EINVAL;
goto error;
};
cmd.args[0x00] = CMD_LNB_SET_DC_LEVEL;
cmd.args[0x01] = 0;
cmd.args[0x02] = voltage;
cmd.len = 0x03;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *diseqc_cmd)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i;
u8 tmp;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
dbg("%s: msg_len=%d", __func__, diseqc_cmd->msg_len);
if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 16) {
ret = -EINVAL;
goto error;
}
/* wait LNB TX */
for (i = 500, tmp = 0; i && !tmp; i--) {
ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
if (ret)
goto error;
usleep_range(10000, 20000);
}
dbg("%s: loop=%d", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
if (ret)
goto error;
cmd.args[0x00] = CMD_LNB_SEND_DISEQC;
cmd.args[0x01] = 0;
cmd.args[0x02] = 0;
cmd.args[0x03] = 0;
cmd.args[0x04] = 2;
cmd.args[0x05] = 0;
cmd.args[0x06] = diseqc_cmd->msg_len;
memcpy(&cmd.args[0x07], diseqc_cmd->msg, diseqc_cmd->msg_len);
cmd.len = 0x07 + diseqc_cmd->msg_len;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
struct dvb_diseqc_slave_reply *reply)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i;
u8 tmp;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
dbg("%s:", __func__);
/* wait LNB RX */
for (i = 500, tmp = 0; i && !tmp; i--) {
ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x02);
if (ret)
goto error;
usleep_range(10000, 20000);
}
dbg("%s: loop=%d", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
/* reply len */
ret = tda10071_rd_reg(priv, 0x46, &tmp);
if (ret)
goto error;
reply->msg_len = tmp & 0x1f; /* [4:0] */;
if (reply->msg_len > sizeof(reply->msg))
reply->msg_len = sizeof(reply->msg); /* truncate API max */
/* read reply */
cmd.args[0x00] = CMD_LNB_UPDATE_REPLY;
cmd.args[0x01] = 0;
cmd.len = 0x02;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
ret = tda10071_rd_regs(priv, cmd.len, reply->msg, reply->msg_len);
if (ret)
goto error;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
fe_sec_mini_cmd_t fe_sec_mini_cmd)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i;
u8 tmp, burst;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
dbg("%s: fe_sec_mini_cmd=%d", __func__, fe_sec_mini_cmd);
switch (fe_sec_mini_cmd) {
case SEC_MINI_A:
burst = 0;
break;
case SEC_MINI_B:
burst = 1;
break;
default:
dbg("%s: invalid fe_sec_mini_cmd", __func__);
ret = -EINVAL;
goto error;
}
/* wait LNB TX */
for (i = 500, tmp = 0; i && !tmp; i--) {
ret = tda10071_rd_reg_mask(priv, 0x47, &tmp, 0x01);
if (ret)
goto error;
usleep_range(10000, 20000);
}
dbg("%s: loop=%d", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
ret = tda10071_wr_reg_mask(priv, 0x47, 0x00, 0x01);
if (ret)
goto error;
cmd.args[0x00] = CMD_LNB_SEND_TONEBURST;
cmd.args[0x01] = 0;
cmd.args[0x02] = burst;
cmd.len = 0x03;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct tda10071_priv *priv = fe->demodulator_priv;
int ret;
u8 tmp;
*status = 0;
if (!priv->warm) {
ret = 0;
goto error;
}
ret = tda10071_rd_reg(priv, 0x39, &tmp);
if (ret)
goto error;
if (tmp & 0x01) /* tuner PLL */
*status |= FE_HAS_SIGNAL;
if (tmp & 0x02) /* demod PLL */
*status |= FE_HAS_CARRIER;
if (tmp & 0x04) /* viterbi or LDPC*/
*status |= FE_HAS_VITERBI;
if (tmp & 0x08) /* RS or BCH */
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
priv->fe_status = *status;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct tda10071_priv *priv = fe->demodulator_priv;
int ret;
u8 buf[2];
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
*snr = 0;
ret = 0;
goto error;
}
ret = tda10071_rd_regs(priv, 0x3a, buf, 2);
if (ret)
goto error;
/* Es/No dBx10 */
*snr = buf[0] << 8 | buf[1];
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret;
u8 tmp;
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
*strength = 0;
ret = 0;
goto error;
}
cmd.args[0x00] = CMD_GET_AGCACC;
cmd.args[0x01] = 0;
cmd.len = 0x02;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
/* input power estimate dBm */
ret = tda10071_rd_reg(priv, 0x50, &tmp);
if (ret)
goto error;
if (tmp < 181)
tmp = 181; /* -75 dBm */
else if (tmp > 236)
tmp = 236; /* -20 dBm */
/* scale value to 0x0000-0xffff */
*strength = (tmp-181) * 0xffff / (236-181);
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i, len;
u8 tmp, reg, buf[8];
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
*ber = priv->ber = 0;
ret = 0;
goto error;
}
switch (priv->delivery_system) {
case SYS_DVBS:
reg = 0x4c;
len = 8;
i = 1;
break;
case SYS_DVBS2:
reg = 0x4d;
len = 4;
i = 0;
break;
default:
*ber = priv->ber = 0;
return 0;
}
ret = tda10071_rd_reg(priv, reg, &tmp);
if (ret)
goto error;
if (priv->meas_count[i] == tmp) {
dbg("%s: meas not ready=%02x", __func__, tmp);
*ber = priv->ber;
return 0;
} else {
priv->meas_count[i] = tmp;
}
cmd.args[0x00] = CMD_BER_UPDATE_COUNTERS;
cmd.args[0x01] = 0;
cmd.args[0x02] = i;
cmd.len = 0x03;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
ret = tda10071_rd_regs(priv, cmd.len, buf, len);
if (ret)
goto error;
if (priv->delivery_system == SYS_DVBS) {
*ber = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
priv->ucb += (buf[4] << 8) | buf[5];
} else {
*ber = (buf[0] << 8) | buf[1];
}
priv->ber = *ber;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct tda10071_priv *priv = fe->demodulator_priv;
int ret = 0;
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
*ucblocks = 0;
goto error;
}
/* UCB is updated when BER is read. Assume BER is read anyway. */
*ucblocks = priv->ucb;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_set_frontend(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
dbg("%s: delivery_system=%d modulation=%d frequency=%d " \
"symbol_rate=%d inversion=%d pilot=%d rolloff=%d", __func__,
c->delivery_system, c->modulation, c->frequency,
c->symbol_rate, c->inversion, c->pilot, c->rolloff);
priv->delivery_system = SYS_UNDEFINED;
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
switch (c->inversion) {
case INVERSION_OFF:
inversion = 1;
break;
case INVERSION_ON:
inversion = 0;
break;
case INVERSION_AUTO:
/* 2 = auto; try first on then off
* 3 = auto; try first off then on */
inversion = 3;
break;
default:
dbg("%s: invalid inversion", __func__);
ret = -EINVAL;
goto error;
}
switch (c->delivery_system) {
case SYS_DVBS:
rolloff = 0;
pilot = 2;
break;
case SYS_DVBS2:
switch (c->rolloff) {
case ROLLOFF_20:
rolloff = 2;
break;
case ROLLOFF_25:
rolloff = 1;
break;
case ROLLOFF_35:
rolloff = 0;
break;
case ROLLOFF_AUTO:
default:
dbg("%s: invalid rolloff", __func__);
ret = -EINVAL;
goto error;
}
switch (c->pilot) {
case PILOT_OFF:
pilot = 0;
break;
case PILOT_ON:
pilot = 1;
break;
case PILOT_AUTO:
pilot = 2;
break;
default:
dbg("%s: invalid pilot", __func__);
ret = -EINVAL;
goto error;
}
break;
default:
dbg("%s: invalid delivery_system", __func__);
ret = -EINVAL;
goto error;
}
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
c->modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
dbg("%s: mode found=%02x", __func__, mode);
break;
}
}
if (mode == 0xff) {
dbg("%s: invalid parameter combination", __func__);
ret = -EINVAL;
goto error;
}
if (c->symbol_rate <= 5000000)
div = 14;
else
div = 4;
ret = tda10071_wr_reg(priv, 0x81, div);
if (ret)
goto error;
ret = tda10071_wr_reg(priv, 0xe3, div);
if (ret)
goto error;
cmd.args[0x00] = CMD_CHANGE_CHANNEL;
cmd.args[0x01] = 0;
cmd.args[0x02] = mode;
cmd.args[0x03] = (c->frequency >> 16) & 0xff;
cmd.args[0x04] = (c->frequency >> 8) & 0xff;
cmd.args[0x05] = (c->frequency >> 0) & 0xff;
cmd.args[0x06] = ((c->symbol_rate / 1000) >> 8) & 0xff;
cmd.args[0x07] = ((c->symbol_rate / 1000) >> 0) & 0xff;
cmd.args[0x08] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
cmd.args[0x09] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
cmd.args[0x0a] = rolloff;
cmd.args[0x0b] = inversion;
cmd.args[0x0c] = pilot;
cmd.args[0x0d] = 0x00;
cmd.args[0x0e] = 0x00;
cmd.len = 0x0f;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
priv->delivery_system = c->delivery_system;
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_get_frontend(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 buf[5], tmp;
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
ret = -EFAULT;
goto error;
}
ret = tda10071_rd_regs(priv, 0x30, buf, 5);
if (ret)
goto error;
tmp = buf[0] & 0x3f;
for (i = 0; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (tmp == TDA10071_MODCOD[i].val) {
c->modulation = TDA10071_MODCOD[i].modulation;
c->fec_inner = TDA10071_MODCOD[i].fec;
c->delivery_system = TDA10071_MODCOD[i].delivery_system;
}
}
switch ((buf[1] >> 0) & 0x01) {
case 0:
c->inversion = INVERSION_OFF;
break;
case 1:
c->inversion = INVERSION_ON;
break;
}
switch ((buf[1] >> 7) & 0x01) {
case 0:
c->pilot = PILOT_OFF;
break;
case 1:
c->pilot = PILOT_ON;
break;
}
c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0);
ret = tda10071_rd_regs(priv, 0x52, buf, 3);
if (ret)
goto error;
c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_init(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i, len, remaining, fw_size;
const struct firmware *fw;
u8 *fw_file = TDA10071_DEFAULT_FIRMWARE;
u8 tmp, buf[4];
struct tda10071_reg_val_mask tab[] = {
{ 0xcd, 0x00, 0x07 },
{ 0x80, 0x00, 0x02 },
{ 0xcd, 0x00, 0xc0 },
{ 0xce, 0x00, 0x1b },
{ 0x9d, 0x00, 0x01 },
{ 0x9d, 0x00, 0x02 },
{ 0x9e, 0x00, 0x01 },
{ 0x87, 0x00, 0x80 },
{ 0xce, 0x00, 0x08 },
{ 0xce, 0x00, 0x10 },
};
struct tda10071_reg_val_mask tab2[] = {
{ 0xf1, 0x70, 0xff },
{ 0x88, priv->cfg.pll_multiplier, 0x3f },
{ 0x89, 0x00, 0x10 },
{ 0x89, 0x10, 0x10 },
{ 0xc0, 0x01, 0x01 },
{ 0xc0, 0x00, 0x01 },
{ 0xe0, 0xff, 0xff },
{ 0xe0, 0x00, 0xff },
{ 0x96, 0x1e, 0x7e },
{ 0x8b, 0x08, 0x08 },
{ 0x8b, 0x00, 0x08 },
{ 0x8f, 0x1a, 0x7e },
{ 0x8c, 0x68, 0xff },
{ 0x8d, 0x08, 0xff },
{ 0x8e, 0x4c, 0xff },
{ 0x8f, 0x01, 0x01 },
{ 0x8b, 0x04, 0x04 },
{ 0x8b, 0x00, 0x04 },
{ 0x87, 0x05, 0x07 },
{ 0x80, 0x00, 0x20 },
{ 0xc8, 0x01, 0xff },
{ 0xb4, 0x47, 0xff },
{ 0xb5, 0x9c, 0xff },
{ 0xb6, 0x7d, 0xff },
{ 0xba, 0x00, 0x03 },
{ 0xb7, 0x47, 0xff },
{ 0xb8, 0x9c, 0xff },
{ 0xb9, 0x7d, 0xff },
{ 0xba, 0x00, 0x0c },
{ 0xc8, 0x00, 0xff },
{ 0xcd, 0x00, 0x04 },
{ 0xcd, 0x00, 0x20 },
{ 0xe8, 0x02, 0xff },
{ 0xcf, 0x20, 0xff },
{ 0x9b, 0xd7, 0xff },
{ 0x9a, 0x01, 0x03 },
{ 0xa8, 0x05, 0x0f },
{ 0xa8, 0x65, 0xf0 },
{ 0xa6, 0xa0, 0xf0 },
{ 0x9d, 0x50, 0xfc },
{ 0x9e, 0x20, 0xe0 },
{ 0xa3, 0x1c, 0x7c },
{ 0xd5, 0x03, 0x03 },
};
/* firmware status */
ret = tda10071_rd_reg(priv, 0x51, &tmp);
if (ret)
goto error;
if (!tmp) {
/* warm state - wake up device from sleep */
priv->warm = 1;
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = tda10071_wr_reg_mask(priv, tab[i].reg,
tab[i].val, tab[i].mask);
if (ret)
goto error;
}
cmd.args[0x00] = CMD_SET_SLEEP_MODE;
cmd.args[0x01] = 0;
cmd.args[0x02] = 0;
cmd.len = 0x03;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
} else {
/* cold state - try to download firmware */
priv->warm = 0;
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
if (ret) {
err("did not find the firmware file. (%s) "
"Please see linux/Documentation/dvb/ for more" \
" details on firmware-problems. (%d)",
fw_file, ret);
goto error;
}
/* init */
for (i = 0; i < ARRAY_SIZE(tab2); i++) {
ret = tda10071_wr_reg_mask(priv, tab2[i].reg,
tab2[i].val, tab2[i].mask);
if (ret)
goto error_release_firmware;
}
/* download firmware */
ret = tda10071_wr_reg(priv, 0xe0, 0x7f);
if (ret)
goto error_release_firmware;
ret = tda10071_wr_reg(priv, 0xf7, 0x81);
if (ret)
goto error_release_firmware;
ret = tda10071_wr_reg(priv, 0xf8, 0x00);
if (ret)
goto error_release_firmware;
ret = tda10071_wr_reg(priv, 0xf9, 0x00);
if (ret)
goto error_release_firmware;
info("found a '%s' in cold state, will try to load a firmware",
tda10071_ops.info.name);
info("downloading firmware from file '%s'", fw_file);
/* do not download last byte */
fw_size = fw->size - 1;
for (remaining = fw_size; remaining > 0;
remaining -= (priv->cfg.i2c_wr_max - 1)) {
len = remaining;
if (len > (priv->cfg.i2c_wr_max - 1))
len = (priv->cfg.i2c_wr_max - 1);
ret = tda10071_wr_regs(priv, 0xfa,
(u8 *) &fw->data[fw_size - remaining], len);
if (ret) {
err("firmware download failed=%d", ret);
if (ret)
goto error_release_firmware;
}
}
release_firmware(fw);
ret = tda10071_wr_reg(priv, 0xf7, 0x0c);
if (ret)
goto error;
ret = tda10071_wr_reg(priv, 0xe0, 0x00);
if (ret)
goto error;
/* wait firmware start */
msleep(250);
/* firmware status */
ret = tda10071_rd_reg(priv, 0x51, &tmp);
if (ret)
goto error;
if (tmp) {
info("firmware did not run");
ret = -EFAULT;
goto error;
} else {
priv->warm = 1;
}
cmd.args[0x00] = CMD_GET_FW_VERSION;
cmd.len = 0x01;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
ret = tda10071_rd_regs(priv, cmd.len, buf, 4);
if (ret)
goto error;
info("firmware version %d.%d.%d.%d",
buf[0], buf[1], buf[2], buf[3]);
info("found a '%s' in warm state.", tda10071_ops.info.name);
ret = tda10071_rd_regs(priv, 0x81, buf, 2);
if (ret)
goto error;
cmd.args[0x00] = CMD_DEMOD_INIT;
cmd.args[0x01] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
cmd.args[0x02] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
cmd.args[0x03] = buf[0];
cmd.args[0x04] = buf[1];
cmd.args[0x05] = priv->cfg.pll_multiplier;
cmd.args[0x06] = priv->cfg.spec_inv;
cmd.args[0x07] = 0x00;
cmd.len = 0x08;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
cmd.args[0x00] = CMD_TUNER_INIT;
cmd.args[0x01] = 0x00;
cmd.args[0x02] = 0x00;
cmd.args[0x03] = 0x00;
cmd.args[0x04] = 0x00;
cmd.args[0x05] = 0x14;
cmd.args[0x06] = 0x00;
cmd.args[0x07] = 0x03;
cmd.args[0x08] = 0x02;
cmd.args[0x09] = 0x02;
cmd.args[0x0a] = 0x00;
cmd.args[0x0b] = 0x00;
cmd.args[0x0c] = 0x00;
cmd.args[0x0d] = 0x00;
cmd.args[0x0e] = 0x00;
cmd.len = 0x0f;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
cmd.args[0x00] = CMD_MPEG_CONFIG;
cmd.args[0x01] = 0;
cmd.args[0x02] = priv->cfg.ts_mode;
cmd.args[0x03] = 0x00;
cmd.args[0x04] = 0x04;
cmd.args[0x05] = 0x00;
cmd.len = 0x06;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
ret = tda10071_wr_reg_mask(priv, 0xf0, 0x01, 0x01);
if (ret)
goto error;
cmd.args[0x00] = CMD_LNB_CONFIG;
cmd.args[0x01] = 0;
cmd.args[0x02] = 150;
cmd.args[0x03] = 3;
cmd.args[0x04] = 22;
cmd.args[0x05] = 1;
cmd.args[0x06] = 1;
cmd.args[0x07] = 30;
cmd.args[0x08] = 30;
cmd.args[0x09] = 30;
cmd.args[0x0a] = 30;
cmd.len = 0x0b;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
cmd.args[0x00] = CMD_BER_CONTROL;
cmd.args[0x01] = 0;
cmd.args[0x02] = 14;
cmd.args[0x03] = 14;
cmd.len = 0x04;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
}
return ret;
error_release_firmware:
release_firmware(fw);
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_sleep(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
struct tda10071_cmd cmd;
int ret, i;
struct tda10071_reg_val_mask tab[] = {
{ 0xcd, 0x07, 0x07 },
{ 0x80, 0x02, 0x02 },
{ 0xcd, 0xc0, 0xc0 },
{ 0xce, 0x1b, 0x1b },
{ 0x9d, 0x01, 0x01 },
{ 0x9d, 0x02, 0x02 },
{ 0x9e, 0x01, 0x01 },
{ 0x87, 0x80, 0x80 },
{ 0xce, 0x08, 0x08 },
{ 0xce, 0x10, 0x10 },
};
if (!priv->warm) {
ret = -EFAULT;
goto error;
}
cmd.args[0x00] = CMD_SET_SLEEP_MODE;
cmd.args[0x01] = 0;
cmd.args[0x02] = 1;
cmd.len = 0x03;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = tda10071_wr_reg_mask(priv, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret)
goto error;
}
return ret;
error:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int tda10071_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 8000;
s->step_size = 0;
s->max_drift = 0;
return 0;
}
static void tda10071_release(struct dvb_frontend *fe)
{
struct tda10071_priv *priv = fe->demodulator_priv;
kfree(priv);
}
struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
struct i2c_adapter *i2c)
{
int ret;
struct tda10071_priv *priv = NULL;
u8 tmp;
/* allocate memory for the internal priv */
priv = kzalloc(sizeof(struct tda10071_priv), GFP_KERNEL);
if (priv == NULL) {
ret = -ENOMEM;
goto error;
}
/* setup the priv */
priv->i2c = i2c;
memcpy(&priv->cfg, config, sizeof(struct tda10071_config));
/* chip ID */
ret = tda10071_rd_reg(priv, 0xff, &tmp);
if (ret || tmp != 0x0f)
goto error;
/* chip type */
ret = tda10071_rd_reg(priv, 0xdd, &tmp);
if (ret || tmp != 0x00)
goto error;
/* chip version */
ret = tda10071_rd_reg(priv, 0xfe, &tmp);
if (ret || tmp != 0x01)
goto error;
/* create dvb_frontend */
memcpy(&priv->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
priv->fe.demodulator_priv = priv;
return &priv->fe;
error:
dbg("%s: failed=%d", __func__, ret);
kfree(priv);
return NULL;
}
EXPORT_SYMBOL(tda10071_attach);
static struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_tolerance = 5000,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
FE_CAN_FEC_4_5 |
FE_CAN_FEC_5_6 |
FE_CAN_FEC_6_7 |
FE_CAN_FEC_7_8 |
FE_CAN_FEC_8_9 |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_RECOVER |
FE_CAN_2G_MODULATION
},
.release = tda10071_release,
.get_tune_settings = tda10071_get_tune_settings,
.init = tda10071_init,
.sleep = tda10071_sleep,
.set_frontend = tda10071_set_frontend,
.get_frontend = tda10071_get_frontend,
.read_status = tda10071_read_status,
.read_snr = tda10071_read_snr,
.read_signal_strength = tda10071_read_signal_strength,
.read_ber = tda10071_read_ber,
.read_ucblocks = tda10071_read_ucblocks,
.diseqc_send_master_cmd = tda10071_diseqc_send_master_cmd,
.diseqc_recv_slave_reply = tda10071_diseqc_recv_slave_reply,
.diseqc_send_burst = tda10071_diseqc_send_burst,
.set_tone = tda10071_set_tone,
.set_voltage = tda10071_set_voltage,
};
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("NXP TDA10071 DVB-S/S2 demodulator driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
klabit87/jflte_vzw_of1 | arch/arm/mach-at91/leds.c | 5132 | 3691 | /*
* LED driver for Atmel AT91-based boards.
*
* Copyright (C) SAN People (Pty) Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/board.h>
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_NEW_LEDS)
/*
* New cross-platform LED support.
*/
static struct gpio_led_platform_data led_data;
static struct platform_device at91_gpio_leds_device = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &led_data,
};
void __init at91_gpio_leds(struct gpio_led *leds, int nr)
{
int i;
if (!nr)
return;
for (i = 0; i < nr; i++)
at91_set_gpio_output(leds[i].gpio, leds[i].active_low);
led_data.leds = leds;
led_data.num_leds = nr;
platform_device_register(&at91_gpio_leds_device);
}
#else
void __init at91_gpio_leds(struct gpio_led *leds, int nr) {}
#endif
/* ------------------------------------------------------------------------- */
#if defined (CONFIG_LEDS_ATMEL_PWM)
/*
* PWM Leds
*/
static struct gpio_led_platform_data pwm_led_data;
static struct platform_device at91_pwm_leds_device = {
.name = "leds-atmel-pwm",
.id = -1,
.dev.platform_data = &pwm_led_data,
};
void __init at91_pwm_leds(struct gpio_led *leds, int nr)
{
int i;
u32 pwm_mask = 0;
if (!nr)
return;
for (i = 0; i < nr; i++)
pwm_mask |= (1 << leds[i].gpio);
pwm_led_data.leds = leds;
pwm_led_data.num_leds = nr;
at91_add_device_pwm(pwm_mask);
platform_device_register(&at91_pwm_leds_device);
}
#else
void __init at91_pwm_leds(struct gpio_led *leds, int nr){}
#endif
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_LEDS)
#include <asm/leds.h>
/*
* Old ARM-specific LED framework; not fully functional when generic time is
* in use.
*/
static u8 at91_leds_cpu;
static u8 at91_leds_timer;
static inline void at91_led_on(unsigned int led)
{
at91_set_gpio_value(led, 0);
}
static inline void at91_led_off(unsigned int led)
{
at91_set_gpio_value(led, 1);
}
static inline void at91_led_toggle(unsigned int led)
{
unsigned long is_off = at91_get_gpio_value(led);
if (is_off)
at91_led_on(led);
else
at91_led_off(led);
}
/*
* Handle LED events.
*/
static void at91_leds_event(led_event_t evt)
{
unsigned long flags;
local_irq_save(flags);
switch(evt) {
case led_start: /* System startup */
at91_led_on(at91_leds_cpu);
break;
case led_stop: /* System stop / suspend */
at91_led_off(at91_leds_cpu);
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer: /* Every 50 timer ticks */
at91_led_toggle(at91_leds_timer);
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start: /* Entering idle state */
at91_led_off(at91_leds_cpu);
break;
case led_idle_end: /* Exit idle state */
at91_led_on(at91_leds_cpu);
break;
#endif
default:
break;
}
local_irq_restore(flags);
}
static int __init leds_init(void)
{
if (!at91_leds_timer || !at91_leds_cpu)
return -ENODEV;
leds_event = at91_leds_event;
leds_event(led_start);
return 0;
}
__initcall(leds_init);
void __init at91_init_leds(u8 cpu_led, u8 timer_led)
{
/* Enable GPIO to access the LEDs */
at91_set_gpio_output(cpu_led, 1);
at91_set_gpio_output(timer_led, 1);
at91_leds_cpu = cpu_led;
at91_leds_timer = timer_led;
}
#else
void __init at91_init_leds(u8 cpu_led, u8 timer_led) {}
#endif
| gpl-2.0 |
somcom3x/android_kernel_motorola_msm8226 | arch/sparc/kernel/chmc.c | 7436 | 20646 | /* chmc.c: Driver for UltraSPARC-III memory controller.
*
* Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/spitfire.h>
#include <asm/chmctrl.h>
#include <asm/cpudata.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/head.h>
#include <asm/io.h>
#include <asm/memctrl.h>
#define DRV_MODULE_NAME "chmc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.2"
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int mc_type;
#define MC_TYPE_SAFARI 1
#define MC_TYPE_JBUS 2
static dimm_printer_t us3mc_dimm_printer;
#define CHMCTRL_NDGRPS 2
#define CHMCTRL_NDIMMS 4
#define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
/* OBP memory-layout property format. */
struct chmc_obp_map {
unsigned char dimm_map[144];
unsigned char pin_map[576];
};
#define DIMM_LABEL_SZ 8
struct chmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct chmc_obp_map map[2];
};
#define CHMCTRL_NBANKS 4
struct chmc_bank_info {
struct chmc *p;
int bank_id;
u64 raw_reg;
int valid;
int uk;
int um;
int lk;
int lm;
int interleave;
unsigned long base;
unsigned long size;
};
struct chmc {
struct list_head list;
int portid;
struct chmc_obp_mem_layout layout_prop;
int layout_size;
void __iomem *regs;
u64 timing_control1;
u64 timing_control2;
u64 timing_control3;
u64 timing_control4;
u64 memaddr_control;
struct chmc_bank_info logical_banks[CHMCTRL_NBANKS];
};
#define JBUSMC_REGS_SIZE 8
#define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL
#define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL
#define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL
#define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL
#define JB_MC_REG1_XOR 0x0000010000000000UL
#define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL
#define JB_MC_REG1_ADDR_GEN_2_SHIFT 37
#define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL
#define JB_MC_REG1_ADDR_GEN_1_SHIFT 34
#define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL
#define JB_MC_REG1_INTERLEAVE_SHIFT 23
#define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL
#define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21
#define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL
#define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20
#define PART_TYPE_X8 0
#define PART_TYPE_X4 1
#define INTERLEAVE_NONE 0
#define INTERLEAVE_SAME 1
#define INTERLEAVE_INTERNAL 2
#define INTERLEAVE_BOTH 3
#define ADDR_GEN_128MB 0
#define ADDR_GEN_256MB 1
#define ADDR_GEN_512MB 2
#define ADDR_GEN_1GB 3
#define JB_NUM_DIMM_GROUPS 2
#define JB_NUM_DIMMS_PER_GROUP 2
#define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP)
struct jbusmc_obp_map {
unsigned char dimm_map[18];
unsigned char pin_map[144];
};
struct jbusmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct jbusmc_obp_map map;
char _pad;
};
struct jbusmc_dimm_group {
struct jbusmc *controller;
int index;
u64 base_addr;
u64 size;
};
struct jbusmc {
void __iomem *regs;
u64 mc_reg_1;
u32 portid;
struct jbusmc_obp_mem_layout layout;
int layout_len;
int num_dimm_groups;
struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS];
struct list_head list;
};
static DEFINE_SPINLOCK(mctrl_list_lock);
static LIST_HEAD(mctrl_list);
static void mc_list_add(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_add(list, &mctrl_list);
spin_unlock(&mctrl_list_lock);
}
static void mc_list_del(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_del_init(list);
spin_unlock(&mctrl_list_lock);
}
#define SYNDROME_MIN -1
#define SYNDROME_MAX 144
/* Covert syndrome code into the way the bits are positioned
* on the bus.
*/
static int syndrome_to_qword_code(int syndrome_code)
{
if (syndrome_code < 128)
syndrome_code += 16;
else if (syndrome_code < 128 + 9)
syndrome_code -= (128 - 7);
else if (syndrome_code < (128 + 9 + 3))
syndrome_code -= (128 + 9 - 4);
else
syndrome_code -= (128 + 9 + 3);
return syndrome_code;
}
/* All this magic has to do with how a cache line comes over the wire
* on Safari and JBUS. A 64-bit line comes over in 1 or more quadword
* cycles, each of which transmit ECC/MTAG info as well as the actual
* data.
*/
#define L2_LINE_SIZE 64
#define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1)
#define QW_PER_LINE 4
#define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE)
#define QW_BITS 144
#define SAFARI_LAST_BIT (576 - 1)
#define JBUS_LAST_BIT (144 - 1)
static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr,
int *pin_p, char **dimm_str_p, void *_prop,
int base_dimm_offset)
{
int qword_code = syndrome_to_qword_code(syndrome_code);
int cache_line_offset;
int offset_inverse;
int dimm_map_index;
int map_val;
if (mc_type == MC_TYPE_JBUS) {
struct jbusmc_obp_mem_layout *p = _prop;
/* JBUS */
cache_line_offset = qword_code;
offset_inverse = (JBUS_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse / 8;
map_val = p->map.dimm_map[dimm_map_index];
map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = p->map.pin_map[cache_line_offset];
} else {
struct chmc_obp_mem_layout *p = _prop;
struct chmc_obp_map *mp;
int qword;
/* Safari */
if (p->symmetric)
mp = &p->map[0];
else
mp = &p->map[1];
qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES;
cache_line_offset = ((3 - qword) * QW_BITS) + qword_code;
offset_inverse = (SAFARI_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse >> 2;
map_val = mp->dimm_map[dimm_map_index];
map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = mp->pin_map[cache_line_offset];
}
}
static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr)
{
struct jbusmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int i;
for (i = 0; i < p->num_dimm_groups; i++) {
struct jbusmc_dimm_group *dp = &p->dimm_groups[i];
if (phys_addr < dp->base_addr ||
(dp->base_addr + dp->size) <= phys_addr)
continue;
return dp;
}
}
return NULL;
}
static int jbusmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct jbusmc_obp_mem_layout *prop;
struct jbusmc_dimm_group *dp;
struct jbusmc *p;
int first_dimm;
dp = jbusmc_find_dimm_group(phys_addr);
if (dp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
p = dp->controller;
prop = &p->layout;
first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this dimm group.
*/
for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
static u64 __devinit jbusmc_dimm_group_size(u64 base,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
u64 max = base + (8UL * 1024 * 1024 * 1024);
u64 max_seen = base;
int i;
for (i = 0; i < num_mem_regs; i++) {
const struct linux_prom64_registers *ent;
u64 this_base;
u64 this_end;
ent = &mem_regs[i];
this_base = ent->phys_addr;
this_end = this_base + ent->reg_size;
if (base < this_base || base >= this_end)
continue;
if (this_end > max)
this_end = max;
if (this_end > max_seen)
max_seen = this_end;
}
return max_seen - base;
}
static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p,
unsigned long index,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
struct jbusmc_dimm_group *dp = &p->dimm_groups[index];
dp->controller = p;
dp->index = index;
dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024));
dp->base_addr += (index * (8UL * 1024 * 1024 * 1024));
dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs);
}
static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) {
jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) {
jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
}
static int __devinit jbusmc_probe(struct platform_device *op)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
int err, len, num_mem_regs;
struct jbusmc *p;
const u32 *prop;
const void *ml;
err = -ENODEV;
mem_node = of_find_node_by_path("/memory");
if (!mem_node) {
printk(KERN_ERR PFX "Cannot find /memory node.\n");
goto out;
}
mem_regs = of_get_property(mem_node, "reg", &len);
if (!mem_regs) {
printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n");
goto out;
}
num_mem_regs = len / sizeof(*mem_regs);
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n");
goto out;
}
INIT_LIST_HEAD(&p->list);
err = -ENODEV;
prop = of_get_property(op->dev.of_node, "portid", &len);
if (!prop || len != 4) {
printk(KERN_ERR PFX "Cannot find portid.\n");
goto out_free;
}
p->portid = *prop;
prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len);
if (!prop || len != 8) {
printk(KERN_ERR PFX "Cannot get memory control register 1.\n");
goto out_free;
}
p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1];
err = -ENOMEM;
p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc");
if (!p->regs) {
printk(KERN_ERR PFX "Cannot map jbusmc regs.\n");
goto out_free;
}
err = -ENODEV;
ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len);
if (!ml) {
printk(KERN_ERR PFX "Cannot get memory layout property.\n");
goto out_iounmap;
}
if (p->layout_len > sizeof(p->layout)) {
printk(KERN_ERR PFX "Unexpected memory-layout size %d\n",
p->layout_len);
goto out_iounmap;
}
memcpy(&p->layout, ml, p->layout_len);
jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
op->dev.of_node->full_name);
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_iounmap:
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
out_free:
kfree(p);
goto out;
}
/* Does BANK decode PHYS_ADDR? */
static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr)
{
unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
/* Bank must be enabled to match. */
if (bp->valid == 0)
return 0;
/* Would BANK match upper bits? */
upper_bits ^= bp->um; /* What bits are different? */
upper_bits = ~upper_bits; /* Invert. */
upper_bits |= bp->uk; /* What bits don't matter for matching? */
upper_bits = ~upper_bits; /* Invert. */
if (upper_bits)
return 0;
/* Would BANK match lower bits? */
lower_bits ^= bp->lm; /* What bits are different? */
lower_bits = ~lower_bits; /* Invert. */
lower_bits |= bp->lk; /* What bits don't matter for matching? */
lower_bits = ~lower_bits; /* Invert. */
if (lower_bits)
return 0;
/* I always knew you'd be the one. */
return 1;
}
/* Given PHYS_ADDR, search memory controller banks for a match. */
static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr)
{
struct chmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int bank_no;
for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
struct chmc_bank_info *bp;
bp = &p->logical_banks[bank_no];
if (chmc_bank_match(bp, phys_addr))
return bp;
}
}
return NULL;
}
/* This is the main purpose of this driver. */
static int chmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct chmc_bank_info *bp;
struct chmc_obp_mem_layout *prop;
int bank_in_controller, first_dimm;
bp = chmc_find_bank(phys_addr);
if (bp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
prop = &bp->p->layout_prop;
bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
first_dimm *= CHMCTRL_NDIMMS;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this bank.
*/
for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
/* Accessing the registers is slightly complicated. If you want
* to get at the memory controller which is on the same processor
* the code is executing, you must use special ASI load/store else
* you go through the global mapping.
*/
static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset)
{
unsigned long ret, this_cpu;
preempt_disable();
this_cpu = real_hard_smp_processor_id();
if (p->portid == this_cpu) {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
preempt_enable();
return ret;
}
#if 0 /* currently unused */
static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val)
{
if (p->portid == smp_processor_id()) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (val),
"r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa %0, [%1] %2"
: : "r" (val),
"r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
}
#endif
static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val)
{
struct chmc_bank_info *bp = &p->logical_banks[which_bank];
bp->p = p;
bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank;
bp->raw_reg = val;
bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
bp->base = (bp->um);
bp->base &= ~(bp->uk);
bp->base <<= PA_UPPER_BITS_SHIFT;
switch(bp->lk) {
case 0xf:
default:
bp->interleave = 1;
break;
case 0xe:
bp->interleave = 2;
break;
case 0xc:
bp->interleave = 4;
break;
case 0x8:
bp->interleave = 8;
break;
case 0x0:
bp->interleave = 16;
break;
}
/* UK[10] is reserved, and UK[11] is not set for the SDRAM
* bank size definition.
*/
bp->size = (((unsigned long)bp->uk &
((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
bp->size /= bp->interleave;
}
static void chmc_fetch_decode_regs(struct chmc *p)
{
if (p->layout_size == 0)
return;
chmc_interpret_one_decode_reg(p, 0,
chmc_read_mcreg(p, CHMCTRL_DECODE1));
chmc_interpret_one_decode_reg(p, 1,
chmc_read_mcreg(p, CHMCTRL_DECODE2));
chmc_interpret_one_decode_reg(p, 2,
chmc_read_mcreg(p, CHMCTRL_DECODE3));
chmc_interpret_one_decode_reg(p, 3,
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
static int __devinit chmc_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
unsigned long ver;
const void *pval;
int len, portid;
struct chmc *p;
int err;
err = -ENODEV;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID)
goto out;
portid = of_getintprop_default(dp, "portid", -1);
if (portid == -1)
goto out;
pval = of_get_property(dp, "memory-layout", &len);
if (pval && len > sizeof(p->layout_prop)) {
printk(KERN_ERR PFX "Unexpected memory-layout property "
"size %d.\n", len);
goto out;
}
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct chmc.\n");
goto out;
}
p->portid = portid;
p->layout_size = len;
if (!pval)
p->layout_size = 0;
else
memcpy(&p->layout_prop, pval, len);
p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc");
if (!p->regs) {
printk(KERN_ERR PFX "Could not map registers.\n");
goto out_free;
}
if (p->layout_size != 0UL) {
p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1);
p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2);
p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3);
p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4);
p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL);
}
chmc_fetch_decode_regs(p);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n",
dp->full_name,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_free:
kfree(p);
goto out;
}
static int __devinit us3mc_probe(struct platform_device *op)
{
if (mc_type == MC_TYPE_SAFARI)
return chmc_probe(op);
else if (mc_type == MC_TYPE_JBUS)
return jbusmc_probe(op);
return -ENODEV;
}
static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p)
{
list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, 0x48);
kfree(p);
}
static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p)
{
mc_list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
kfree(p);
}
static int __devexit us3mc_remove(struct platform_device *op)
{
void *p = dev_get_drvdata(&op->dev);
if (p) {
if (mc_type == MC_TYPE_SAFARI)
chmc_destroy(op, p);
else if (mc_type == MC_TYPE_JBUS)
jbusmc_destroy(op, p);
}
return 0;
}
static const struct of_device_id us3mc_match[] = {
{
.name = "memory-controller",
},
{},
};
MODULE_DEVICE_TABLE(of, us3mc_match);
static struct platform_driver us3mc_driver = {
.driver = {
.name = "us3mc",
.owner = THIS_MODULE,
.of_match_table = us3mc_match,
},
.probe = us3mc_probe,
.remove = __devexit_p(us3mc_remove),
};
static inline bool us3mc_platform(void)
{
if (tlb_type == cheetah || tlb_type == cheetah_plus)
return true;
return false;
}
static int __init us3mc_init(void)
{
unsigned long ver;
int ret;
if (!us3mc_platform())
return -ENODEV;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID) {
mc_type = MC_TYPE_JBUS;
us3mc_dimm_printer = jbusmc_print_dimm;
} else {
mc_type = MC_TYPE_SAFARI;
us3mc_dimm_printer = chmc_print_dimm;
}
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
ret = platform_driver_register(&us3mc_driver);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
return ret;
}
static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
platform_driver_unregister(&us3mc_driver);
}
}
module_init(us3mc_init);
module_exit(us3mc_cleanup);
| gpl-2.0 |
simar7/singhdroid | kernel/fs/jffs2/compr_zlib.c | 7692 | 5619 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#if !defined(__KERNEL__) && !defined(__ECOS)
#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/zlib.h>
#include <linux/zutil.h>
#include "nodelist.h"
#include "compr.h"
/* Plan: call deflate() with avail_in == *sourcelen,
avail_out = *dstlen - 12 and flush == Z_FINISH.
If it doesn't manage to finish, call it again with
avail_in == 0 and avail_out set to the remaining 12
bytes for it to clean up.
Q: Is 12 bytes sufficient?
*/
#define STREAM_END_SPACE 12
static DEFINE_MUTEX(deflate_mutex);
static DEFINE_MUTEX(inflate_mutex);
static z_stream inf_strm, def_strm;
#ifdef __KERNEL__ /* Linux-only */
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mutex.h>
static int __init alloc_workspaces(void)
{
def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
MAX_MEM_LEVEL));
if (!def_strm.workspace)
return -ENOMEM;
jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (!inf_strm.workspace) {
vfree(def_strm.workspace);
return -ENOMEM;
}
jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
zlib_inflate_workspacesize());
return 0;
}
static void free_workspaces(void)
{
vfree(def_strm.workspace);
vfree(inf_strm.workspace);
}
#else
#define alloc_workspaces() (0)
#define free_workspaces() do { } while(0)
#endif /* __KERNEL__ */
static int jffs2_zlib_compress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen)
{
int ret;
if (*dstlen <= STREAM_END_SPACE)
return -1;
mutex_lock(&deflate_mutex);
if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
pr_warn("deflateInit failed\n");
mutex_unlock(&deflate_mutex);
return -1;
}
def_strm.next_in = data_in;
def_strm.total_in = 0;
def_strm.next_out = cpage_out;
def_strm.total_out = 0;
while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n",
def_strm.avail_in, def_strm.avail_out);
ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
def_strm.avail_in, def_strm.avail_out,
def_strm.total_in, def_strm.total_out);
if (ret != Z_OK) {
jffs2_dbg(1, "deflate in loop returned %d\n", ret);
zlib_deflateEnd(&def_strm);
mutex_unlock(&deflate_mutex);
return -1;
}
}
def_strm.avail_out += STREAM_END_SPACE;
def_strm.avail_in = 0;
ret = zlib_deflate(&def_strm, Z_FINISH);
zlib_deflateEnd(&def_strm);
if (ret != Z_STREAM_END) {
jffs2_dbg(1, "final deflate returned %d\n", ret);
ret = -1;
goto out;
}
if (def_strm.total_out >= def_strm.total_in) {
jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n",
def_strm.total_in, def_strm.total_out);
ret = -1;
goto out;
}
jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n",
def_strm.total_in, def_strm.total_out);
*dstlen = def_strm.total_out;
*sourcelen = def_strm.total_in;
ret = 0;
out:
mutex_unlock(&deflate_mutex);
return ret;
}
static int jffs2_zlib_decompress(unsigned char *data_in,
unsigned char *cpage_out,
uint32_t srclen, uint32_t destlen)
{
int ret;
int wbits = MAX_WBITS;
mutex_lock(&inflate_mutex);
inf_strm.next_in = data_in;
inf_strm.avail_in = srclen;
inf_strm.total_in = 0;
inf_strm.next_out = cpage_out;
inf_strm.avail_out = destlen;
inf_strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
((data_in[0] & 0x0f) == Z_DEFLATED) &&
!(((data_in[0]<<8) + data_in[1]) % 31)) {
jffs2_dbg(2, "inflate skipping adler32\n");
wbits = -((data_in[0] >> 4) + 8);
inf_strm.next_in += 2;
inf_strm.avail_in -= 2;
} else {
/* Let this remain D1 for now -- it should never happen */
jffs2_dbg(1, "inflate not skipping adler32\n");
}
if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
pr_warn("inflateInit failed\n");
mutex_unlock(&inflate_mutex);
return 1;
}
while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
pr_notice("inflate returned %d\n", ret);
}
zlib_inflateEnd(&inf_strm);
mutex_unlock(&inflate_mutex);
return 0;
}
static struct jffs2_compressor jffs2_zlib_comp = {
.priority = JFFS2_ZLIB_PRIORITY,
.name = "zlib",
.compr = JFFS2_COMPR_ZLIB,
.compress = &jffs2_zlib_compress,
.decompress = &jffs2_zlib_decompress,
#ifdef JFFS2_ZLIB_DISABLED
.disabled = 1,
#else
.disabled = 0,
#endif
};
int __init jffs2_zlib_init(void)
{
int ret;
ret = alloc_workspaces();
if (ret)
return ret;
ret = jffs2_register_compressor(&jffs2_zlib_comp);
if (ret)
free_workspaces();
return ret;
}
void jffs2_zlib_exit(void)
{
jffs2_unregister_compressor(&jffs2_zlib_comp);
free_workspaces();
}
| gpl-2.0 |
CyanogenMod/android_kernel_htc_msm8974 | drivers/staging/tidspbridge/dynload/reloc.c | 8460 | 14023 | /*
* reloc.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include "header.h"
#if TMS32060
/* the magic symbol for the start of BSS */
static const char bsssymbol[] = { ".bss" };
#endif
#if TMS32060
#include "reloc_table_c6000.c"
#endif
#if TMS32060
/* From coff.h - ignore these relocation operations */
#define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */
#define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */
#define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */
#endif
/**************************************************************************
* Procedure dload_unpack
*
* Parameters:
* data pointer to storage unit containing lowest host address of
* image data
* fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
* offset Offset from LSB, 0 <= offset < BITS_PER_AU
* sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
*
* Effect:
* Extracts the specified field and returns it.
************************************************************************* */
rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t * data, int fieldsz,
int offset, unsigned sgn)
{
register rvalue objval;
register int shift, direction;
register tgt_au_t *dp = data;
fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
/* * collect up enough bits to contain the desired field */
if (TARGET_BIG_ENDIAN) {
dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
direction = -1;
} else
direction = 1;
objval = *dp >> offset;
shift = TGTAU_BITS - offset;
while (shift <= fieldsz) {
dp += direction;
objval += (rvalue) *dp << shift;
shift += TGTAU_BITS;
}
/* * sign or zero extend the value appropriately */
if (sgn == ROP_UNS)
objval &= (2 << fieldsz) - 1;
else {
shift = sizeof(rvalue) * BITS_PER_AU - 1 - fieldsz;
objval = (objval << shift) >> shift;
}
return objval;
} /* dload_unpack */
/**************************************************************************
* Procedure dload_repack
*
* Parameters:
* val Value to insert
* data Pointer to storage unit containing lowest host address of
* image data
* fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU
* offset Offset from LSB, 0 <= offset < BITS_PER_AU
* sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY)
*
* Effect:
* Stuffs the specified value in the specified field. Returns 0 for
* success
* or 1 if the value will not fit in the specified field according to the
* specified signedness rule.
************************************************************************* */
static const unsigned char ovf_limit[] = { 1, 2, 2 };
int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t * data,
int fieldsz, int offset, unsigned sgn)
{
register urvalue objval, mask;
register int shift, direction;
register tgt_au_t *dp = data;
fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */
/* clip the bits */
mask = (2UL << fieldsz) - 1;
objval = (val & mask);
/* * store the bits through the specified mask */
if (TARGET_BIG_ENDIAN) {
dp += (fieldsz + offset) >> LOG_TGTAU_BITS;
direction = -1;
} else
direction = 1;
/* insert LSBs */
*dp = (*dp & ~(mask << offset)) + (objval << offset);
shift = TGTAU_BITS - offset;
/* align mask and objval with AU boundary */
objval >>= shift;
mask >>= shift;
while (mask) {
dp += direction;
*dp = (*dp & ~mask) + objval;
objval >>= TGTAU_BITS;
mask >>= TGTAU_BITS;
}
/*
* check for overflow
*/
if (sgn) {
unsigned tmp = (val >> fieldsz) + (sgn & 0x1);
if (tmp > ovf_limit[sgn - 1])
return 1;
}
return 0;
} /* dload_repack */
/* lookup table for the scaling amount in a C6x instruction */
#if TMS32060
#define SCALE_BITS 4 /* there are 4 bits in the scale field */
#define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */
static const u8 c60_scale[SCALE_MASK + 1] = {
1, 0, 0, 0, 1, 1, 2, 2
};
#endif
/**************************************************************************
* Procedure dload_relocate
*
* Parameters:
* data Pointer to base of image data
* rp Pointer to relocation operation
*
* Effect:
* Performs the specified relocation operation
************************************************************************* */
void dload_relocate(struct dload_state *dlthis, tgt_au_t * data,
struct reloc_record_t *rp, bool *tramps_generated,
bool second_pass)
{
rvalue val, reloc_amt, orig_val = 0;
unsigned int fieldsz = 0;
unsigned int offset = 0;
unsigned int reloc_info = 0;
unsigned int reloc_action = 0;
register int rx = 0;
rvalue *stackp = NULL;
int top;
struct local_symbol *svp = NULL;
#ifdef RFV_SCALE
unsigned int scale = 0;
#endif
struct image_packet_t *img_pkt = NULL;
/* The image packet data struct is only used during first pass
* relocation in the event that a trampoline is needed. 2nd pass
* relocation doesn't guarantee that data is coming from an
* image_packet_t structure. See cload.c, dload_data for how img_data is
* set. If that changes this needs to be updated!!! */
if (second_pass == false)
img_pkt = (struct image_packet_t *)((u8 *) data -
sizeof(struct
image_packet_t));
rx = HASH_FUNC(rp->TYPE);
while (rop_map1[rx] != rp->TYPE) {
rx = HASH_L(rop_map2[rx]);
if (rx < 0) {
#if TMS32060
switch (rp->TYPE) {
case R_C60ALIGN:
case R_C60NOCMP:
case R_C60FPHEAD:
/* Ignore these reloc types and return */
break;
default:
/* Unknown reloc type, print error and return */
dload_error(dlthis, "Bad coff operator 0x%x",
rp->TYPE);
}
#else
dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE);
#endif
return;
}
}
rx = HASH_I(rop_map2[rx]);
if ((rx < (sizeof(rop_action) / sizeof(u16)))
&& (rx < (sizeof(rop_info) / sizeof(u16))) && (rx > 0)) {
reloc_action = rop_action[rx];
reloc_info = rop_info[rx];
} else {
dload_error(dlthis, "Buffer Overflow - Array Index Out "
"of Bounds");
}
/* Compute the relocation amount for the referenced symbol, if any */
reloc_amt = rp->UVAL;
if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */
/* If this is first pass, use the module local symbol table,
* else use the trampoline symbol table. */
if (second_pass == false) {
if ((u32) rp->SYMNDX < dlthis->dfile_hdr.df_no_syms) {
/* real symbol reference */
svp = &dlthis->local_symtab[rp->SYMNDX];
reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
svp->delta : svp->value;
}
/* reloc references current section */
else if (rp->SYMNDX == -1) {
reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ?
dlthis->delta_runaddr :
dlthis->image_secn->run_addr;
}
}
}
/* relocation uses a symbol reference */
/* Handle stack adjustment */
val = 0;
top = RFV_STK(reloc_info);
if (top) {
top += dlthis->relstkidx - RSTK_UOP;
if (top >= STATIC_EXPR_STK_SIZE) {
dload_error(dlthis,
"Expression stack overflow in %s at offset "
FMT_UI32, dlthis->image_secn->name,
rp->vaddr + dlthis->image_offset);
return;
}
val = dlthis->relstk[dlthis->relstkidx];
dlthis->relstkidx = top;
stackp = &dlthis->relstk[top];
}
/* Derive field position and size, if we need them */
if (reloc_info & ROP_RW) { /* read or write action in our future */
fieldsz = RFV_WIDTH(reloc_action);
if (fieldsz) { /* field info from table */
offset = RFV_POSN(reloc_action);
if (TARGET_BIG_ENDIAN)
/* make sure vaddr is the lowest target
* address containing bits */
rp->vaddr += RFV_BIGOFF(reloc_info);
} else { /* field info from relocation op */
fieldsz = rp->FIELDSZ;
offset = rp->OFFSET;
if (TARGET_BIG_ENDIAN)
/* make sure vaddr is the lowest target
address containing bits */
rp->vaddr += (rp->WORDSZ - offset - fieldsz)
>> LOG_TARGET_AU_BITS;
}
data = (tgt_au_t *) ((char *)data + TADDR_TO_HOST(rp->vaddr));
/* compute lowest host location of referenced data */
#if BITS_PER_AU > TARGET_AU_BITS
/* conversion from target address to host address may lose
address bits; add loss to offset */
if (TARGET_BIG_ENDIAN) {
offset += -((rp->vaddr << LOG_TARGET_AU_BITS) +
offset + fieldsz) &
(BITS_PER_AU - TARGET_AU_BITS);
} else {
offset += (rp->vaddr << LOG_TARGET_AU_BITS) &
(BITS_PER_AU - 1);
}
#endif
#ifdef RFV_SCALE
scale = RFV_SCALE(reloc_info);
#endif
}
/* read the object value from the current image, if so ordered */
if (reloc_info & ROP_R) {
/* relocation reads current image value */
val = dload_unpack(dlthis, data, fieldsz, offset,
RFV_SIGN(reloc_info));
/* Save off the original value in case the relo overflows and
* we can trampoline it. */
orig_val = val;
#ifdef RFV_SCALE
val <<= scale;
#endif
}
/* perform the necessary arithmetic */
switch (RFV_ACTION(reloc_action)) { /* relocation actions */
case RACT_VAL:
break;
case RACT_ASGN:
val = reloc_amt;
break;
case RACT_ADD:
val += reloc_amt;
break;
case RACT_PCR:
/*-----------------------------------------------------------
* Handle special cases of jumping from absolute sections
* (special reloc type) or to absolute destination
* (symndx == -1). In either case, set the appropriate
* relocation amount to 0.
*----------------------------------------------------------- */
if (rp->SYMNDX == -1)
reloc_amt = 0;
val += reloc_amt - dlthis->delta_runaddr;
break;
case RACT_ADDISP:
val += rp->R_DISP + reloc_amt;
break;
case RACT_ASGPC:
val = dlthis->image_secn->run_addr + reloc_amt;
break;
case RACT_PLUS:
if (stackp != NULL)
val += *stackp;
break;
case RACT_SUB:
if (stackp != NULL)
val = *stackp - val;
break;
case RACT_NEG:
val = -val;
break;
case RACT_MPY:
if (stackp != NULL)
val *= *stackp;
break;
case RACT_DIV:
if (stackp != NULL)
val = *stackp / val;
break;
case RACT_MOD:
if (stackp != NULL)
val = *stackp % val;
break;
case RACT_SR:
if (val >= sizeof(rvalue) * BITS_PER_AU)
val = 0;
else if (stackp != NULL)
val = (urvalue) *stackp >> val;
break;
case RACT_ASR:
if (val >= sizeof(rvalue) * BITS_PER_AU)
val = sizeof(rvalue) * BITS_PER_AU - 1;
else if (stackp != NULL)
val = *stackp >> val;
break;
case RACT_SL:
if (val >= sizeof(rvalue) * BITS_PER_AU)
val = 0;
else if (stackp != NULL)
val = *stackp << val;
break;
case RACT_AND:
if (stackp != NULL)
val &= *stackp;
break;
case RACT_OR:
if (stackp != NULL)
val |= *stackp;
break;
case RACT_XOR:
if (stackp != NULL)
val ^= *stackp;
break;
case RACT_NOT:
val = ~val;
break;
#if TMS32060
case RACT_C6SECT:
/* actually needed address of secn containing symbol */
if (svp != NULL) {
if (rp->SYMNDX >= 0)
if (svp->secnn > 0)
reloc_amt = dlthis->ldr_sections
[svp->secnn - 1].run_addr;
}
/* !!! FALL THRU !!! */
case RACT_C6BASE:
if (dlthis->bss_run_base == 0) {
struct dynload_symbol *symp;
symp = dlthis->mysym->find_matching_symbol
(dlthis->mysym, bsssymbol);
/* lookup value of global BSS base */
if (symp)
dlthis->bss_run_base = symp->value;
else
dload_error(dlthis,
"Global BSS base referenced in %s "
"offset" FMT_UI32 " but not "
"defined",
dlthis->image_secn->name,
rp->vaddr + dlthis->image_offset);
}
reloc_amt -= dlthis->bss_run_base;
/* !!! FALL THRU !!! */
case RACT_C6DSPL:
/* scale factor determined by 3 LSBs of field */
scale = c60_scale[val & SCALE_MASK];
offset += SCALE_BITS;
fieldsz -= SCALE_BITS;
val >>= SCALE_BITS; /* ignore the scale field hereafter */
val <<= scale;
val += reloc_amt; /* do the usual relocation */
if (((1 << scale) - 1) & val)
dload_error(dlthis,
"Unaligned reference in %s offset "
FMT_UI32, dlthis->image_secn->name,
rp->vaddr + dlthis->image_offset);
break;
#endif
} /* relocation actions */
/* * Put back result as required */
if (reloc_info & ROP_W) { /* relocation writes image value */
#ifdef RFV_SCALE
val >>= scale;
#endif
if (dload_repack(dlthis, val, data, fieldsz, offset,
RFV_SIGN(reloc_info))) {
/* Check to see if this relo can be trampolined,
* but only in first phase relocation. 2nd phase
* relocation cannot trampoline. */
if ((second_pass == false) &&
(dload_tramp_avail(dlthis, rp) == true)) {
/* Before generating the trampoline, restore
* the value to its original so the 2nd pass
* relo will work. */
dload_repack(dlthis, orig_val, data, fieldsz,
offset, RFV_SIGN(reloc_info));
if (!dload_tramp_generate(dlthis,
(dlthis->image_secn -
dlthis->ldr_sections),
dlthis->image_offset,
img_pkt, rp)) {
dload_error(dlthis,
"Failed to "
"generate trampoline for "
"bit overflow");
dload_error(dlthis,
"Relocation val " FMT_UI32
" overflows %d bits in %s "
"offset " FMT_UI32, val,
fieldsz,
dlthis->image_secn->name,
dlthis->image_offset +
rp->vaddr);
} else
*tramps_generated = true;
} else {
dload_error(dlthis, "Relocation value "
FMT_UI32 " overflows %d bits in %s"
" offset " FMT_UI32, val, fieldsz,
dlthis->image_secn->name,
dlthis->image_offset + rp->vaddr);
}
}
} else if (top)
*stackp = val;
} /* reloc_value */
| gpl-2.0 |
roggin/iconia-a500-kernel | drivers/scsi/scsi_transport_srp.c | 9996 | 10394 | /*
* SCSI RDMA (SRP) transport class
*
* Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_srp.h>
#include "scsi_transport_srp_internal.h"
struct srp_host_attrs {
atomic_t next_port_id;
};
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
#define SRP_RPORT_ATTRS 2
struct srp_internal {
struct scsi_transport_template t;
struct srp_function_template *f;
struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
struct device_attribute private_rport_attrs[SRP_RPORT_ATTRS];
struct transport_container rport_attr_cont;
};
#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
#define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
static int srp_host_setup(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
atomic_set(&srp_host->next_port_id, 0);
return 0;
}
static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
NULL, NULL);
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
#define SETUP_TEMPLATE(attrb, field, perm, test, ro_test, ro_perm) \
i->private_##attrb[count] = dev_attr_##field; \
i->private_##attrb[count].attr.mode = perm; \
if (ro_test) { \
i->private_##attrb[count].attr.mode = ro_perm; \
i->private_##attrb[count].store = NULL; \
} \
i->attrb[count] = &i->private_##attrb[count]; \
if (test) \
count++
#define SETUP_RPORT_ATTRIBUTE_RD(field) \
SETUP_TEMPLATE(rport_attrs, field, S_IRUGO, 1, 0, 0)
#define SETUP_RPORT_ATTRIBUTE_RW(field) \
SETUP_TEMPLATE(rport_attrs, field, S_IRUGO | S_IWUSR, \
1, 1, S_IRUGO)
#define SRP_PID(p) \
(p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
(p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
(p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
(p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
"%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
static ssize_t
show_srp_rport_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
}
static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
static const struct {
u32 value;
char *name;
} srp_rport_role_names[] = {
{SRP_RPORT_ROLE_INITIATOR, "SRP Initiator"},
{SRP_RPORT_ROLE_TARGET, "SRP Target"},
};
static ssize_t
show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_rport *rport = transport_class_to_srp_rport(dev);
int i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++)
if (srp_rport_role_names[i].value == rport->roles) {
name = srp_rport_role_names[i].name;
break;
}
return sprintf(buf, "%s\n", name ? : "unknown");
}
static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
put_device(dev->parent);
kfree(rport);
}
static int scsi_is_srp_rport(const struct device *dev)
{
return dev->release == srp_rport_release;
}
static int srp_rport_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
struct srp_internal *i;
if (!scsi_is_srp_rport(dev))
return 0;
shost = dev_to_shost(dev->parent);
if (!shost->transportt)
return 0;
if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
return 0;
i = to_srp_internal(shost->transportt);
return &i->rport_attr_cont.ac == cont;
}
static int srp_host_match(struct attribute_container *cont, struct device *dev)
{
struct Scsi_Host *shost;
struct srp_internal *i;
if (!scsi_is_host_device(dev))
return 0;
shost = dev_to_shost(dev);
if (!shost->transportt)
return 0;
if (shost->transportt->host_attrs.ac.class != &srp_host_class.class)
return 0;
i = to_srp_internal(shost->transportt);
return &i->t.host_attrs.ac == cont;
}
/**
* srp_rport_add - add a SRP remote port to the device hierarchy
* @shost: scsi host the remote port is connected to.
* @ids: The port id for the remote port.
*
* Publishes a port to the rest of the system.
*/
struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
struct srp_rport_identifiers *ids)
{
struct srp_rport *rport;
struct device *parent = &shost->shost_gendev;
int id, ret;
rport = kzalloc(sizeof(*rport), GFP_KERNEL);
if (!rport)
return ERR_PTR(-ENOMEM);
device_initialize(&rport->dev);
rport->dev.parent = get_device(parent);
rport->dev.release = srp_rport_release;
memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
rport->roles = ids->roles;
id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
transport_setup_device(&rport->dev);
ret = device_add(&rport->dev);
if (ret) {
transport_destroy_device(&rport->dev);
put_device(&rport->dev);
return ERR_PTR(ret);
}
if (shost->active_mode & MODE_TARGET &&
ids->roles == SRP_RPORT_ROLE_INITIATOR) {
ret = srp_tgt_it_nexus_create(shost, (unsigned long)rport,
rport->port_id);
if (ret) {
device_del(&rport->dev);
transport_destroy_device(&rport->dev);
put_device(&rport->dev);
return ERR_PTR(ret);
}
}
transport_add_device(&rport->dev);
transport_configure_device(&rport->dev);
return rport;
}
EXPORT_SYMBOL_GPL(srp_rport_add);
/**
* srp_rport_del - remove a SRP remote port
* @rport: SRP remote port to remove
*
* Removes the specified SRP remote port.
*/
void srp_rport_del(struct srp_rport *rport)
{
struct device *dev = &rport->dev;
struct Scsi_Host *shost = dev_to_shost(dev->parent);
if (shost->active_mode & MODE_TARGET &&
rport->roles == SRP_RPORT_ROLE_INITIATOR)
srp_tgt_it_nexus_destroy(shost, (unsigned long)rport);
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
put_device(dev);
}
EXPORT_SYMBOL_GPL(srp_rport_del);
static int do_srp_rport_del(struct device *dev, void *data)
{
if (scsi_is_srp_rport(dev))
srp_rport_del(dev_to_rport(dev));
return 0;
}
/**
* srp_remove_host - tear down a Scsi_Host's SRP data structures
* @shost: Scsi Host that is torn down
*
* Removes all SRP remote ports for a given Scsi_Host.
* Must be called just before scsi_remove_host for SRP HBAs.
*/
void srp_remove_host(struct Scsi_Host *shost)
{
device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
}
EXPORT_SYMBOL_GPL(srp_remove_host);
static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
int result)
{
struct srp_internal *i = to_srp_internal(shost->transportt);
return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
}
static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
{
struct srp_internal *i = to_srp_internal(shost->transportt);
return i->f->it_nexus_response(shost, nexus, result);
}
/**
* srp_attach_transport - instantiate SRP transport template
* @ft: SRP transport class function template
*/
struct scsi_transport_template *
srp_attach_transport(struct srp_function_template *ft)
{
int count;
struct srp_internal *i;
i = kzalloc(sizeof(*i), GFP_KERNEL);
if (!i)
return NULL;
i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
i->t.it_nexus_response = srp_it_nexus_response;
i->t.host_size = sizeof(struct srp_host_attrs);
i->t.host_attrs.ac.attrs = &i->host_attrs[0];
i->t.host_attrs.ac.class = &srp_host_class.class;
i->t.host_attrs.ac.match = srp_host_match;
i->host_attrs[0] = NULL;
transport_container_register(&i->t.host_attrs);
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &srp_rport_class.class;
i->rport_attr_cont.ac.match = srp_rport_match;
transport_container_register(&i->rport_attr_cont);
count = 0;
SETUP_RPORT_ATTRIBUTE_RD(port_id);
SETUP_RPORT_ATTRIBUTE_RD(roles);
i->rport_attrs[count] = NULL;
i->f = ft;
return &i->t;
}
EXPORT_SYMBOL_GPL(srp_attach_transport);
/**
* srp_release_transport - release SRP transport template instance
* @t: transport template instance
*/
void srp_release_transport(struct scsi_transport_template *t)
{
struct srp_internal *i = to_srp_internal(t);
transport_container_unregister(&i->t.host_attrs);
transport_container_unregister(&i->rport_attr_cont);
kfree(i);
}
EXPORT_SYMBOL_GPL(srp_release_transport);
static __init int srp_transport_init(void)
{
int ret;
ret = transport_class_register(&srp_host_class);
if (ret)
return ret;
ret = transport_class_register(&srp_rport_class);
if (ret)
goto unregister_host_class;
return 0;
unregister_host_class:
transport_class_unregister(&srp_host_class);
return ret;
}
static void __exit srp_transport_exit(void)
{
transport_class_unregister(&srp_host_class);
transport_class_unregister(&srp_rport_class);
}
MODULE_AUTHOR("FUJITA Tomonori");
MODULE_DESCRIPTION("SRP Transport Attributes");
MODULE_LICENSE("GPL");
module_init(srp_transport_init);
module_exit(srp_transport_exit);
| gpl-2.0 |
aduggan/rpi-linux | arch/avr32/kernel/ptrace.c | 12044 | 9469 | /*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/unistd.h>
#include <linux/notifier.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/ocd.h>
#include <asm/mmu_context.h>
#include <linux/kdebug.h>
static struct pt_regs *get_user_regs(struct task_struct *tsk)
{
return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
THREAD_SIZE - sizeof(struct pt_regs));
}
void user_enable_single_step(struct task_struct *tsk)
{
pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n",
tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr);
/*
* We can't schedule in Debug mode, so when TIF_BREAKPOINT is
* set, the system call or exception handler will do a
* breakpoint to enter monitor mode before returning to
* userspace.
*
* The monitor code will then notice that TIF_SINGLE_STEP is
* set and return to userspace with single stepping enabled.
* The CPU will then enter monitor mode again after exactly
* one instruction has been executed, and the monitor code
* will then send a SIGTRAP to the process.
*/
set_tsk_thread_flag(tsk, TIF_BREAKPOINT);
set_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
}
void user_disable_single_step(struct task_struct *child)
{
/* XXX(hch): a no-op here seems wrong.. */
}
/*
* Called by kernel/ptrace.c when detaching
*
* Make sure any single step bits, etc. are not set
*/
void ptrace_disable(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLE_STEP);
clear_tsk_thread_flag(child, TIF_BREAKPOINT);
ocd_disable(child);
}
/*
* Read the word at offset "offset" into the task's "struct user". We
* actually access the pt_regs struct stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long offset,
unsigned long __user *data)
{
unsigned long *regs;
unsigned long value;
if (offset & 3 || offset >= sizeof(struct user)) {
printk("ptrace_read_user: invalid offset 0x%08lx\n", offset);
return -EIO;
}
regs = (unsigned long *)get_user_regs(tsk);
value = 0;
if (offset < sizeof(struct pt_regs))
value = regs[offset / sizeof(regs[0])];
pr_debug("ptrace_read_user(%s[%u], %#lx, %p) -> %#lx\n",
tsk->comm, tsk->pid, offset, data, value);
return put_user(value, data);
}
/*
* Write the word "value" to offset "offset" into the task's "struct
* user". We actually access the pt_regs struct stored on the kernel
* stack.
*/
static int ptrace_write_user(struct task_struct *tsk, unsigned long offset,
unsigned long value)
{
unsigned long *regs;
pr_debug("ptrace_write_user(%s[%u], %#lx, %#lx)\n",
tsk->comm, tsk->pid, offset, value);
if (offset & 3 || offset >= sizeof(struct user)) {
pr_debug(" invalid offset 0x%08lx\n", offset);
return -EIO;
}
if (offset >= sizeof(struct pt_regs))
return 0;
regs = (unsigned long *)get_user_regs(tsk);
regs[offset / sizeof(regs[0])] = value;
return 0;
}
static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
{
struct pt_regs *regs = get_user_regs(tsk);
return copy_to_user(uregs, regs, sizeof(*regs)) ? -EFAULT : 0;
}
static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
{
struct pt_regs newregs;
int ret;
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(newregs)) == 0) {
struct pt_regs *regs = get_user_regs(tsk);
ret = -EINVAL;
if (valid_user_regs(&newregs)) {
*regs = newregs;
ret = 0;
}
}
return ret;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
void __user *datap = (void __user *) data;
switch (request) {
/* Read the word at location addr in the child process */
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, datap);
break;
/* Write the word in data at location addr */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage void syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/* The 0x80 provides a way for the tracing parent to
* distinguish between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it
* will do for normal use. strace only continues with a
* signal if the stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
pr_debug("syscall_trace: sending signal %d to PID %u\n",
current->exit_code, current->pid);
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
/*
* debug_trampoline() is an assembly stub which will store all user
* registers on the stack and execute a breakpoint instruction.
*
* If we single-step into an exception handler which runs with
* interrupts disabled the whole time so it doesn't have to check for
* pending work, its return address will be modified so that it ends
* up returning to debug_trampoline.
*
* If the exception handler decides to store the user context and
* enable interrupts after all, it will restore the original return
* address and status register value. Before it returns, it will
* notice that TIF_BREAKPOINT is set and execute a breakpoint
* instruction.
*/
extern void debug_trampoline(void);
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs)
{
struct thread_info *ti;
unsigned long trampoline_addr;
u32 status;
u32 ctrl;
int code;
status = ocd_read(DS);
ti = current_thread_info();
code = TRAP_BRKPT;
pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n",
status, regs->pc, regs->sr, ti->flags);
if (!user_mode(regs)) {
unsigned long die_val = DIE_BREAKPOINT;
if (status & (1 << OCD_DS_SSS_BIT))
die_val = DIE_SSTEP;
if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP)
== NOTIFY_STOP)
return regs;
if ((status & (1 << OCD_DS_SWB_BIT))
&& test_and_clear_ti_thread_flag(
ti, TIF_BREAKPOINT)) {
/*
* Explicit breakpoint from trampoline or
* exception/syscall/interrupt handler.
*
* The real saved regs are on the stack right
* after the ones we saved on entry.
*/
regs++;
pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:"
"PC=0x%08lx SR=0x%08lx\n",
regs->pc, regs->sr);
BUG_ON(!user_mode(regs));
if (test_thread_flag(TIF_SINGLE_STEP)) {
pr_debug("Going to do single step...\n");
return regs;
}
/*
* No TIF_SINGLE_STEP means we're done
* stepping over a syscall. Do the trap now.
*/
code = TRAP_TRACE;
} else if ((status & (1 << OCD_DS_SSS_BIT))
&& test_ti_thread_flag(ti, TIF_SINGLE_STEP)) {
pr_debug("Stepped into something, "
"setting TIF_BREAKPOINT...\n");
set_ti_thread_flag(ti, TIF_BREAKPOINT);
/*
* We stepped into an exception, interrupt or
* syscall handler. Some exception handlers
* don't check for pending work, so we need to
* set up a trampoline just in case.
*
* The exception entry code will undo the
* trampoline stuff if it does a full context
* save (which also means that it'll check for
* pending work later.)
*/
if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) {
trampoline_addr
= (unsigned long)&debug_trampoline;
pr_debug("Setting up trampoline...\n");
ti->rar_saved = sysreg_read(RAR_EX);
ti->rsr_saved = sysreg_read(RSR_EX);
sysreg_write(RAR_EX, trampoline_addr);
sysreg_write(RSR_EX, (MODE_EXCEPTION
| SR_EM | SR_GM));
BUG_ON(ti->rsr_saved & MODE_MASK);
}
/*
* If we stepped into a system call, we
* shouldn't do a single step after we return
* since the return address is right after the
* "scall" instruction we were told to step
* over.
*/
if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) {
pr_debug("Supervisor; no single step\n");
clear_ti_thread_flag(ti, TIF_SINGLE_STEP);
}
ctrl = ocd_read(DC);
ctrl &= ~(1 << OCD_DC_SS_BIT);
ocd_write(DC, ctrl);
return regs;
} else {
printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n",
status);
printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags);
die("Unhandled debug trap in kernel mode",
regs, SIGTRAP);
}
} else if (status & (1 << OCD_DS_SSS_BIT)) {
/* Single step in user mode */
code = TRAP_TRACE;
ctrl = ocd_read(DC);
ctrl &= ~(1 << OCD_DC_SS_BIT);
ocd_write(DC, ctrl);
}
pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n",
code, regs->pc, regs->sr);
clear_thread_flag(TIF_SINGLE_STEP);
_exception(SIGTRAP, regs, code, instruction_pointer(regs));
return regs;
}
| gpl-2.0 |
s9yobena/linux | sound/core/timer_compat.c | 13836 | 3619 | /*
* 32bit -> 64bit ioctl wrapper for timer API
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file included from timer.c */
#include <linux/compat.h>
struct snd_timer_info32 {
u32 flags;
s32 card;
unsigned char id[64];
unsigned char name[80];
u32 reserved0;
u32 resolution;
unsigned char reserved[64];
};
static int snd_timer_user_info_compat(struct file *file,
struct snd_timer_info32 __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info32 info;
struct snd_timer *t;
tu = file->private_data;
if (snd_BUG_ON(!tu->timeri))
return -ENXIO;
t = tu->timeri->timer;
if (snd_BUG_ON(!t))
return -ENXIO;
memset(&info, 0, sizeof(info));
info.card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info.flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info.id, t->id, sizeof(info.id));
strlcpy(info.name, t->name, sizeof(info.name));
info.resolution = t->hw.resolution;
if (copy_to_user(_info, &info, sizeof(*_info)))
return -EFAULT;
return 0;
}
struct snd_timer_status32 {
struct compat_timespec tstamp;
u32 resolution;
u32 lost;
u32 overrun;
u32 queue;
unsigned char reserved[64];
};
static int snd_timer_user_status_compat(struct file *file,
struct snd_timer_status32 __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (snd_BUG_ON(!tu->timeri))
return -ENXIO;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
/*
*/
enum {
SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
};
static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = compat_ptr(arg);
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
case SNDRV_TIMER_IOCTL_TREAD:
case SNDRV_TIMER_IOCTL_GINFO:
case SNDRV_TIMER_IOCTL_GPARAMS:
case SNDRV_TIMER_IOCTL_GSTATUS:
case SNDRV_TIMER_IOCTL_SELECT:
case SNDRV_TIMER_IOCTL_PARAMS:
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
case SNDRV_TIMER_IOCTL_INFO32:
return snd_timer_user_info_compat(file, argp);
case SNDRV_TIMER_IOCTL_STATUS32:
return snd_timer_user_status_compat(file, argp);
}
return -ENOIOCTLCMD;
}
| gpl-2.0 |
DmitryADP/diff_qc750 | kernel/drivers/media/dvb/mantis/mantis_vp3028.c | 14092 | 1176 | /*
Mantis VP-3028 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mantis_common.h"
#include "mantis_vp3028.h"
struct zl10353_config mantis_vp3028_config = {
.demod_address = 0x0f,
};
#define MANTIS_MODEL_NAME "VP-3028"
#define MANTIS_DEV_TYPE "DVB-T"
struct mantis_hwconfig vp3028_mantis_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
};
| gpl-2.0 |
mukulsoni/android_kernel_samsung_ms013g-G4SWA | drivers/media/dvb/mantis/mantis_vp3028.c | 14092 | 1176 | /*
Mantis VP-3028 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mantis_common.h"
#include "mantis_vp3028.h"
struct zl10353_config mantis_vp3028_config = {
.demod_address = 0x0f,
};
#define MANTIS_MODEL_NAME "VP-3028"
#define MANTIS_DEV_TYPE "DVB-T"
struct mantis_hwconfig vp3028_mantis_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
};
| gpl-2.0 |
ZHAW-INES/rioxo-uClinux-dist | lib/libatm/src/arpd/table.c | 13 | 6478 | /* table.c - ATMARP table */
/* Written 1995-2000 by Werner Almesberger, EPFL-LRC/ICA */
#if HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/atm.h>
#include "atm.h"
#include "atmd.h"
#include "atmarpd.h"
#include "table.h"
#define COMPONENT "TABLE"
const char *entry_state_name[] = { "NONE","RESOLV","INVALID","VALID" };
ENTRY *alloc_entry(int svc)
{
ENTRY *entry;
entry = alloc_t(ENTRY);
entry->state = as_none;
entry->svc = svc;
entry->ip = 0;
entry->addr = NULL;
entry->flags = 0;
entry->timer = NULL;
entry->vccs = NULL;
entry->notify = NULL;
entry->itf = NULL;
return entry;
}
ENTRY *lookup_ip(const ITF *itf,uint32_t ip)
{
ENTRY *walk;
for (walk = itf->table; walk; walk = walk->next)
if (walk->ip == ip) break;
return walk;
}
ENTRY *lookup_addr(const ITF *itf,const struct sockaddr_atmsvc *addr)
{
ENTRY *walk;
for (walk = itf->table; walk; walk = walk->next)
if (walk->addr && atm_equal((struct sockaddr *) walk->addr,
(struct sockaddr *) addr,0,0)) break;
return walk;
}
ENTRY *lookup_incoming(const struct sockaddr_atmsvc *addr)
{
ENTRY *walk;
for (walk = unknown_incoming; walk; walk = walk->next)
if (walk->addr && atm_equal((struct sockaddr *) walk->addr,
(struct sockaddr *) addr,0,0)) break;
return walk;
}
static int table_uptodate = 0; /* ATMARP table file is up to date */
static FILE *out_file = NULL;
static int out_error = 0;
static void output(const char *fmt,...)
{
va_list ap;
va_start(ap,fmt);
if (!out_file) vdiag(COMPONENT,DIAG_DEBUG,fmt,ap);
else if (vfprintf(out_file,fmt,ap) < 0 || putc('\n',out_file) < 0)
out_error = errno;
va_end(ap);
}
static void dump_vcc(VCC *vcc)
{
struct sockaddr_atmsvc addr;
char addr_buf[MAX_ATM_ADDR_LEN+1];
char qos_buf[MAX_ATM_QOS_LEN+1];
struct atm_qos qos;
int size,sndbuf;
size = sizeof(addr);
if (getpeername(vcc->fd,(struct sockaddr *) &addr,&size) < 0) {
diag(COMPONENT,DIAG_ERROR,"getpeername: %s",strerror(errno));
strcpy(addr_buf,"<getsocknam error>");
}
else {
#if 0
int i;
for (i = 0; i < size; i++)
printf("%02X ",((unsigned char *) &addr)[i]);
printf("\n");
#endif
if (atm2text(addr_buf,sizeof(addr_buf),(struct sockaddr *) &addr,
pretty) < 0) strcpy(addr_buf,"<atm2text error>");
}
output(" %s%s",addr_buf,vcc->connecting ? ", connecting" :
!vcc->entry || !vcc->entry->svc ? "" : vcc->active ? " (active)" :
" (passive)");
if (vcc->connecting) return;
size = sizeof(qos);
if (getsockopt(vcc->fd,SOL_ATM,SO_ATMQOS,&qos,&size) < 0)
output(" QOS: <unavailable: %s>",strerror(errno));
else if (!vcc->entry || !qos_equal(&vcc->entry->qos,&qos)) {
if (qos2text(qos_buf,sizeof(qos_buf),&qos,0) < 0)
strcpy(qos_buf,"<invalid qos>");
output(" QOS: %s",qos_buf);
}
size = sizeof(sndbuf);
if (getsockopt(vcc->fd,SOL_SOCKET,SO_SNDBUF,&sndbuf,&size) < 0)
output(" Send buffer: <unavailable: %s>",strerror(errno));
else if (!vcc->entry || vcc->entry->sndbuf != sndbuf)
output(" Send buffer: %d",sndbuf);
}
static void dump_vccs(VCC *vcc)
{
while (vcc) {
dump_vcc(vcc);
vcc = vcc->next;
}
}
static void dump_entries(ENTRY *list)
{
static const char *flag_name[] = {
"???", "com", "PERM", "PUBL", /* 0x0001-0x0008 */
"trailers", "netmask", "dontpub", "magic", /* 0x0010-0x0080 */
"???", "???", "???", "???", /* 0x0100-0x0800 */
"NULL", "ARPSRV", "NOVC", "???" }; /* 0x1000-0x8000 */
/* lower case flags are not used by ATMARP */
ENTRY *entry;
char addr_buf[MAX_ATM_ADDR_LEN+1];
char qos_buf[MAX_ATM_QOS_LEN+1];
char tmp[100]; /* large enough for all flags */
unsigned char *ipp;
int i;
for (entry = list; entry ; entry = entry->next) {
if (!entry->addr) strcpy(addr_buf,"<none>");
else if (atm2text(addr_buf,MAX_ATM_ADDR_LEN+1,
(struct sockaddr *) entry->addr,pretty) < 0)
strcpy(addr_buf,"<error>");
ipp = (unsigned char *) &entry->ip;
*tmp = 0;
for (i = 0; i < 16; i++)
if (entry->flags & (1 << i)) {
if (*tmp) strcat(tmp,",");
strcat(tmp,flag_name[i]);
}
output("IP %d.%d.%d.%d, state %s, addr %s, flags 0x%x<%s>",ipp[0],
ipp[1],ipp[2],ipp[3],entry_state_name[entry->state],addr_buf,
entry->flags,tmp);
if (entry->itf && !qos_equal(&entry->itf->qos,&entry->qos)) {
if (qos2text(qos_buf,sizeof(qos_buf),&entry->qos,0) < 0)
strcpy(qos_buf,"<error>");
output(" QOS: %s",qos_buf);
}
if (entry->itf && entry->sndbuf && entry->sndbuf != entry->itf->sndbuf)
output(" Send buffer: %d",entry->sndbuf);
if (entry->notify) {
NOTIFY *notify;
int count;
count = 0;
for (notify = entry->notify; notify; notify = notify->next) count++;
output(" %d quer%s pending",count,count == 1 ? "y" : "ies");
}
dump_vccs(entry->vccs);
}
}
static void dump_itf(ITF *itf)
{
unsigned char *ipp,*nmp;
char buf[MAX_ATM_QOS_LEN+1];
ipp = (unsigned char *) &itf->local_ip;
nmp = (unsigned char *) &itf->netmask;
output("----- Itf %d (%d.%d.%d.%d, netmask %d.%d.%d.%d) -----",itf->number,
ipp[0],ipp[1],ipp[2],ipp[3],nmp[0],nmp[1],nmp[2],nmp[3]);
if (qos2text(buf,sizeof(buf),&itf->qos,0) < 0) strcpy(buf,"<error>");
output("Default QOS: %s",buf);
if (itf->sndbuf) output("Default send buffer: %d",itf->sndbuf);
dump_entries(itf->table);
}
static void dump_all(void)
{
ITF *itf;
for (itf = itfs; itf; itf = itf->next) dump_itf(itf);
output("----- Unknown incoming connections -----");
dump_entries(unknown_incoming);
output("----- Incoming unidirectional connections -----");
dump_vccs(unidirectional_vccs);
output("----- End of dump -----");
}
void table_changed(void)
{
table_uptodate = 0;
table_update(); /* @@@ sigh, fix this later */
if (debug) {
out_file = 0;
dump_all();
}
}
int table_update(void)
{
if (table_uptodate) return 0;
out_file = fopen(ATMARP_TMP_DUMP_FILE,"w");
out_error = 0;
dump_all();
if (fclose(out_file) < 0) out_error = errno;
if (!out_error) {
if (rename(ATMARP_TMP_DUMP_FILE,ATMARP_DUMP_FILE) < 0)
out_error = errno;
else table_uptodate = 1;
}
unlink(ATMARP_TMP_DUMP_FILE);
return out_error;
}
| gpl-2.0 |
billyxue/google-mysql | vio/viossl.c | 13 | 11916 | /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/*
Note that we can't have assertion on file descriptors; The reason for
this is that during mysql shutdown, another thread can close a file
we are working on. In this case we should just return read errors from
the file descriptior.
*/
#include "vio_priv.h"
#include "my_context.h"
#include <mysql_async.h>
#ifdef HAVE_OPENSSL
#ifndef DBUG_OFF
static void
report_errors(SSL* ssl)
{
unsigned long l;
const char *file;
const char *data;
int line, flags;
char buf[512];
DBUG_ENTER("report_errors");
while ((l= ERR_get_error_line_data(&file,&line,&data,&flags)))
{
DBUG_PRINT("error", ("OpenSSL: %s:%s:%d:%s\n", ERR_error_string(l,buf),
file,line,(flags&ERR_TXT_STRING)?data:"")) ;
}
if (ssl)
{
#ifndef DBUG_OFF
int error= SSL_get_error(ssl, l);
DBUG_PRINT("error", ("error: %s (%d)",
ERR_error_string(error, buf), error));
#endif
}
DBUG_PRINT("info", ("socket_errno: %d", socket_errno));
DBUG_VOID_RETURN;
}
#endif
/**
Obtain the equivalent system error status for the last SSL I/O operation.
@param ssl_error The result code of the failed TLS/SSL I/O operation.
*/
static void ssl_set_sys_error(int ssl_error)
{
int error= 0;
switch (ssl_error)
{
case SSL_ERROR_ZERO_RETURN:
error= SOCKET_ECONNRESET;
break;
case SSL_ERROR_WANT_READ:
case SSL_ERROR_WANT_WRITE:
#ifdef SSL_ERROR_WANT_CONNECT
case SSL_ERROR_WANT_CONNECT:
#endif
#ifdef SSL_ERROR_WANT_ACCEPT
case SSL_ERROR_WANT_ACCEPT:
#endif
error= SOCKET_EWOULDBLOCK;
break;
case SSL_ERROR_SSL:
/* Protocol error. */
#ifdef EPROTO
error= EPROTO;
#else
error= SOCKET_ECONNRESET;
#endif
break;
case SSL_ERROR_SYSCALL:
case SSL_ERROR_NONE:
default:
break;
};
/* Set error status to a equivalent of the SSL error. */
if (error)
{
#ifdef _WIN32
WSASetLastError(error);
#else
errno= error;
#endif
}
}
/**
Indicate whether a SSL I/O operation must be retried later.
@param vio VIO object representing a SSL connection.
@param ret Value returned by a SSL I/O function.
@param event[out] The type of I/O event to wait/retry.
@return Whether a SSL I/O operation should be deferred.
@retval TRUE Temporary failure, retry operation.
@retval FALSE Indeterminate failure.
*/
static my_bool ssl_should_retry(Vio *vio, int ret, enum enum_vio_io_event *event)
{
int ssl_error;
SSL *ssl= vio->ssl_arg;
my_bool should_retry= TRUE;
/* Retrieve the result for the SSL I/O operation. */
ssl_error= SSL_get_error(ssl, ret);
/* Retrieve the result for the SSL I/O operation. */
switch (ssl_error)
{
case SSL_ERROR_WANT_READ:
*event= VIO_IO_EVENT_READ;
break;
case SSL_ERROR_WANT_WRITE:
*event= VIO_IO_EVENT_WRITE;
break;
default:
#ifndef DBUG_OFF
report_errors(ssl);
#endif
should_retry= FALSE;
ssl_set_sys_error(ssl_error);
break;
}
return should_retry;
}
size_t vio_ssl_read(Vio *vio, uchar *buf, size_t size)
{
int ret;
SSL *ssl= vio->ssl_arg;
DBUG_ENTER("vio_ssl_read");
DBUG_PRINT("enter", ("sd: %d buf: %p size: %d ssl: %p",
mysql_socket_getfd(vio->mysql_socket), buf, (int) size,
vio->ssl_arg));
if (vio->async_context && vio->async_context->active)
ret= my_ssl_read_async(vio->async_context, (SSL *)vio->ssl_arg, buf, size);
else
{
while ((ret= SSL_read(ssl, buf, size)) < 0)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Attempt to wait for an I/O event. */
if (vio_socket_io_wait(vio, event))
break;
}
}
#ifndef DBUG_OFF
if (ret < 0)
report_errors((SSL*) vio->ssl_arg);
#endif
DBUG_PRINT("exit", ("%d", (int) ret));
DBUG_RETURN(ret < 0 ? -1 : ret);
}
size_t vio_ssl_write(Vio *vio, const uchar *buf, size_t size)
{
int ret;
SSL *ssl= vio->ssl_arg;
DBUG_ENTER("vio_ssl_write");
DBUG_PRINT("enter", ("sd: %d buf: %p size: %d",
mysql_socket_getfd(vio->mysql_socket),
buf, (int) size));
if (vio->async_context && vio->async_context->active)
ret= my_ssl_write_async(vio->async_context, (SSL *)vio->ssl_arg, buf,
size);
else
{
while ((ret= SSL_write(ssl, buf, size)) < 0)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Attempt to wait for an I/O event. */
if (vio_socket_io_wait(vio, event))
break;
}
}
#ifndef DBUG_OFF
if (ret < 0)
report_errors((SSL*) vio->ssl_arg);
#endif
DBUG_RETURN(ret < 0 ? -1 : ret);
}
#ifdef HAVE_YASSL
/* Emulate a blocking recv() call with vio_read(). */
static long yassl_recv(void *ptr, void *buf, size_t len,
int flag __attribute__((unused)))
{
return vio_read(ptr, buf, len);
}
/* Emulate a blocking send() call with vio_write(). */
static long yassl_send(void *ptr, const void *buf, size_t len,
int flag __attribute__((unused)))
{
return vio_write(ptr, buf, len);
}
#endif
int vio_ssl_close(Vio *vio)
{
int r= 0;
SSL *ssl= (SSL*)vio->ssl_arg;
DBUG_ENTER("vio_ssl_close");
if (ssl)
{
/*
THE SSL standard says that SSL sockets must send and receive a close_notify
alert on socket shutdown to avoid truncation attacks. However, this can
cause problems since we often hold a lock during shutdown and this IO can
take an unbounded amount of time to complete. Since our packets are self
describing with length, we aren't vunerable to these attacks. Therefore,
we just shutdown by closing the socket (quiet shutdown).
*/
SSL_set_quiet_shutdown(ssl, 1);
switch ((r= SSL_shutdown(ssl))) {
case 1:
/* Shutdown successful */
break;
case 0:
/*
Shutdown not yet finished - since the socket is going to
be closed there is no need to call SSL_shutdown() a second
time to wait for the other side to respond
*/
break;
default: /* Shutdown failed */
DBUG_PRINT("vio_error", ("SSL_shutdown() failed, error: %d",
SSL_get_error(ssl, r)));
break;
}
}
DBUG_RETURN(vio_close(vio));
}
void vio_ssl_delete(Vio *vio)
{
if (!vio)
return; /* It must be safe to delete null pointer */
if (vio->type == VIO_TYPE_SSL)
vio_ssl_close(vio); /* Still open, close connection first */
if (vio->ssl_arg)
{
SSL_free((SSL*) vio->ssl_arg);
vio->ssl_arg= 0;
}
vio_delete(vio);
}
/** SSL handshake handler. */
typedef int (*ssl_handshake_func_t)(SSL*);
/**
Loop and wait until a SSL handshake is completed.
@param vio VIO object representing a SSL connection.
@param ssl SSL structure for the connection.
@param func SSL handshake handler.
@return Return value is 1 on success.
*/
static int ssl_handshake_loop(Vio *vio, SSL *ssl, ssl_handshake_func_t func)
{
int ret;
vio->ssl_arg= ssl;
/* Initiate the SSL handshake. */
while ((ret= func(ssl)) < 1)
{
enum enum_vio_io_event event;
/* Process the SSL I/O error. */
if (!ssl_should_retry(vio, ret, &event))
break;
/* Wait for I/O so that the handshake can proceed. */
if (vio_socket_io_wait(vio, event))
break;
}
vio->ssl_arg= NULL;
return ret;
}
static int ssl_do(struct st_VioSSLFd *ptr, Vio *vio, long timeout,
ssl_handshake_func_t func, unsigned long *errptr)
{
int r;
SSL *ssl;
my_bool unused;
my_bool was_blocking;
my_socket sd= mysql_socket_getfd(vio->mysql_socket);
DBUG_ENTER("ssl_do");
DBUG_PRINT("enter", ("ptr: 0x%lx, sd: %d ctx: 0x%lx",
(long) ptr, sd, (long) ptr->ssl_context));
/* Set socket to blocking if not already set */
vio_blocking(vio, 1, &was_blocking);
if (!(ssl= SSL_new(ptr->ssl_context)))
{
DBUG_PRINT("error", ("SSL_new failure"));
*errptr= ERR_get_error();
vio_blocking(vio, was_blocking, &unused);
DBUG_RETURN(1);
}
DBUG_PRINT("info", ("ssl: 0x%lx timeout: %ld", (long) ssl, timeout));
SSL_clear(ssl);
SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout);
SSL_set_fd(ssl, sd);
/*
Since yaSSL does not support non-blocking send operations, use
special transport functions that properly handles non-blocking
sockets. These functions emulate the behavior of blocking I/O
operations by waiting for I/O to become available.
*/
#ifdef HAVE_YASSL
/* Set first argument of the transport functions. */
yaSSL_transport_set_ptr(ssl, vio);
/* Set functions to use in order to send and receive data. */
yaSSL_transport_set_recv_function(ssl, yassl_recv);
yaSSL_transport_set_send_function(ssl, yassl_send);
#endif
#if !defined(HAVE_YASSL) && defined(SSL_OP_NO_COMPRESSION)
SSL_set_options(ssl, SSL_OP_NO_COMPRESSION);
#endif
if ((r= ssl_handshake_loop(vio, ssl, func)) < 1)
{
DBUG_PRINT("error", ("SSL_connect/accept failure"));
*errptr= SSL_get_error(ssl, r);
SSL_free(ssl);
vio_blocking(vio, was_blocking, &unused);
DBUG_RETURN(1);
}
/*
Connection succeeded. Install new function handlers,
change type, set sd to the fd used when connecting
and set pointer to the SSL structure
*/
if (vio_reset(vio, VIO_TYPE_SSL, SSL_get_fd(ssl), ssl, 0))
{
vio_blocking(vio, was_blocking, &unused);
DBUG_RETURN(1);
}
#ifndef DBUG_OFF
{
/* Print some info about the peer */
X509 *cert;
char buf[512];
DBUG_PRINT("info",("SSL connection succeeded"));
DBUG_PRINT("info",("Using cipher: '%s'" , SSL_get_cipher_name(ssl)));
if ((cert= SSL_get_peer_certificate (ssl)))
{
DBUG_PRINT("info",("Peer certificate:"));
X509_NAME_oneline(X509_get_subject_name(cert), buf, sizeof(buf));
DBUG_PRINT("info",("\t subject: '%s'", buf));
X509_NAME_oneline(X509_get_issuer_name(cert), buf, sizeof(buf));
DBUG_PRINT("info",("\t issuer: '%s'", buf));
X509_free(cert);
}
else
DBUG_PRINT("info",("Peer does not have certificate."));
if (SSL_get_shared_ciphers(ssl, buf, sizeof(buf)))
{
DBUG_PRINT("info",("shared_ciphers: '%s'", buf));
}
else
DBUG_PRINT("info",("no shared ciphers!"));
}
#endif
DBUG_RETURN(0);
}
int sslaccept(struct st_VioSSLFd *ptr, Vio *vio, long timeout, unsigned long *errptr)
{
DBUG_ENTER("sslaccept");
DBUG_RETURN(ssl_do(ptr, vio, timeout, SSL_accept, errptr));
}
int sslconnect(struct st_VioSSLFd *ptr, Vio *vio, long timeout, unsigned long *errptr)
{
DBUG_ENTER("sslconnect");
DBUG_RETURN(ssl_do(ptr, vio, timeout, SSL_connect, errptr));
}
int vio_ssl_blocking(Vio *vio __attribute__((unused)),
my_bool set_blocking_mode,
my_bool *old_mode)
{
/* Mode is always blocking */
*old_mode= 1;
/* Return error if we try to change to non_blocking mode */
return (set_blocking_mode ? 0 : 1);
}
my_bool vio_ssl_has_data(Vio *vio)
{
return SSL_pending(vio->ssl_arg) > 0 ? TRUE : FALSE;
}
#endif /* HAVE_OPENSSL */
| gpl-2.0 |
blahblahblahblah831/cfg-loader-mod | source/usbstorage.c | 13 | 12025 | /*-------------------------------------------------------------
usbstorage_starlet.c -- USB mass storage support, inside starlet
Copyright (C) 2009 Kwiirk
If this driver is linked before libogc, this will replace the original
usbstorage driver by svpe from libogc
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any
damages arising from the use of this software.
Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and
redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you
must not claim that you wrote the original software. If you use
this software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and
must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
-------------------------------------------------------------*/
#include <unistd.h>
#include <gccore.h>
#include <malloc.h>
#include <stdio.h>
#include <string.h>
#include "debug.h"
/* IOCTL commands */
#define UMS_BASE (('U'<<24)|('M'<<16)|('S'<<8))
#define USB_IOCTL_UMS_INIT (UMS_BASE+0x1)
#define USB_IOCTL_UMS_GET_CAPACITY (UMS_BASE+0x2)
#define USB_IOCTL_UMS_READ_SECTORS (UMS_BASE+0x3)
#define USB_IOCTL_UMS_WRITE_SECTORS (UMS_BASE+0x4)
#define USB_IOCTL_UMS_READ_STRESS (UMS_BASE+0x5)
#define USB_IOCTL_UMS_SET_VERBOSE (UMS_BASE+0x6)
#define WBFS_BASE (('W'<<24)|('F'<<16)|('S'<<8))
#define USB_IOCTL_WBFS_OPEN_DISC (WBFS_BASE+0x1)
#define USB_IOCTL_WBFS_READ_DISC (WBFS_BASE+0x2)
#define USB_IOCTL_WBFS_READ_DEBUG (WBFS_BASE+0x13)
#define USB_IOCTL_WBFS_SET_DEVICE (WBFS_BASE+0x14)
#define USB_IOCTL_WBFS_SET_FRAGLIST (WBFS_BASE+0x15)
#define UMS_HEAPSIZE 0x8000
#define USB_MEM2_SIZE 0x10000
// 0x10000 = 64 KB = 128 x 512 sector = 16 x 4k sector
/* Variables */
static char fs[] ATTRIBUTE_ALIGN(32) = "/dev/usb2";
static char fs2[] ATTRIBUTE_ALIGN(32) = "/dev/usb123";
static char fs3[] ATTRIBUTE_ALIGN(32) = "/dev/usb/ehc";
static s32 hid = -1, fd = -1;
static u32 sector_size;
static void *usb_buf2;
static mutex_t usb_mutex = LWP_MUTEX_NULL;
extern void* SYS_AllocArena2MemLo(u32 size,u32 align);
static inline s32 __USBStorage_isMEM2Buffer(const void *buffer)
{
// MEM1: 0x80000000 (cached) 0xC0000000 (uncached)
// MEM2: 0x90000000 (cached) 0xD0000000 (uncached)
return ((u32)buffer & 0x10000000) != 0;
// Why is this important? MEM1 seems to work just fine
// so let's skip this check
//return true;
}
u32 USBStorage_GetCapacity(u32 *_sector_size)
{
if (fd >= 0) {
s32 ret;
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_GET_CAPACITY, ":i", §or_size);
/*
static int first = 1;
if (first) {
printf("\nSECTORS: %u\n", ret);
printf("SEC SIZE: %u\n", sector_size);
printf("HDD SIZE: %u GB [%u]\n", ret/1024/1024*sector_size/1024, sector_size);
Menu_PrintWait();
first = 0;
}
*/
//dbg_printf("capacity %d %d\n", sector_size, ret);
if (ret && _sector_size)
*_sector_size = sector_size;
return ret;
}
return 0;
}
s32 USBStorage_OpenDev()
{
/* Already open */
if (fd >= 0)
return fd;
/* Create heap */
if (hid < 0) {
hid = iosCreateHeap(UMS_HEAPSIZE);
if (hid < 0)
return IPC_ENOMEM; // = -22
}
// allocate buf2
if (usb_buf2 == NULL) {
usb_buf2 = SYS_AllocArena2MemLo(USB_MEM2_SIZE, 32);
if (usb_buf2 == NULL) {
printf("ERR: usb mem2\n");
sleep(3);
return IPC_ENOMEM; // = -22
}
}
/* Open USB device */
fd = IOS_Open(fs, 0);
dbg_printf("open(%s)=%d", fs, fd);
if (fd < 0) {
dbg_printf("\n");
fd = IOS_Open(fs2, 0);
dbg_printf("open(%s)=%d", fs2, fd);
}
if (fd < 0) {
dbg_printf("\n");
fd = IOS_Open(fs3, 0);
dbg_printf("open(%s)=%d", fs3, fd);
}
if (fd < 0) {
dbg_printf("\n");
}
LWP_MutexInit(&usb_mutex, false);
return fd;
}
s32 USBStorage_Init(void)
{
s32 ret;
u32 cap;
u32 sect_size;
get_time(&TIME.usb_init1);
USBStorage_OpenDev();
get_time(&TIME.usb_open);
if (fd < 0)
return fd;
/* Initialize USB storage */
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_INIT, ":");
dbg_printf(" init:%d", ret);
get_time(&TIME.usb_cap);
/* Get device capacity */
cap = USBStorage_GetCapacity(§_size);
dbg_printf(" cap:%u ss:%u\n", cap, sect_size);
get_time(&TIME.usb_init2);
if (cap < 10000)
goto err;
return 0;
err:
/* Close USB device */
if (fd >= 0) {
IOS_Close(fd);
fd = -1;
}
return -1;
}
void USBStorage_Deinit(void)
{
/* Close USB device */
if (fd >= 0) {
IOS_Close(fd);
fd = -1;
}
/*if (hid > 0) {
iosDestroyHeap(hid);
hid = -1;
}*/
LWP_MutexDestroy(usb_mutex);
usb_mutex = LWP_MUTEX_NULL;
}
s32 USBStorage_ReadSectors(u32 sector, u32 numSectors, void *buffer)
{
u32 size;
s32 ret = -1;
/* Device not opened */
if (fd < 0)
return fd;
/* check align and MEM1 buffer */
if (((u32)buffer & 0x1F) || (!__USBStorage_isMEM2Buffer(buffer))) {
if (!usb_buf2) return IPC_ENOMEM;
int cnt;
int max_sec = USB_MEM2_SIZE / sector_size;
//dbg_printf("usb_read(%u,%u) unaligned(%p)\n", sector, numSectors, buffer);
while (numSectors) {
if (numSectors > max_sec) cnt = max_sec; else cnt = numSectors;
size = cnt * sector_size;
LWP_MutexLock(usb_mutex);
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_READ_SECTORS,
"ii:d", sector, cnt, usb_buf2, size);
memcpy(buffer, usb_buf2, size);
LWP_MutexUnlock(usb_mutex);
//dbg_printf("usb_read_chunk(%u,%u)=%d\n", sector, cnt, ret);
if (ret < 0) return ret;
numSectors -= cnt;
sector += cnt;
buffer += size;
}
} else {
size = sector_size * numSectors;
/* Read data */
LWP_MutexLock(usb_mutex);
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_READ_SECTORS,
"ii:d", sector, numSectors, buffer, size);
LWP_MutexUnlock(usb_mutex);
}
//dbg_printf("read %u %u = %d\n", sector, numSectors, ret);
return ret;
}
s32 USBStorage_WriteSectors(u32 sector, u32 numSectors, const void *buffer)
{
u32 size;
s32 ret = -1;
/* Device not opened */
if (fd < 0)
return fd;
/* check align and MEM1 buffer */
if (((u32)buffer & 0x1F) || (!__USBStorage_isMEM2Buffer(buffer))) {
if (!usb_buf2) return IPC_ENOMEM;
int cnt;
int max_sec = USB_MEM2_SIZE / sector_size;
//dbg_printf("usb_write(%u,%u) unaligned(%p)\n", sector, numSectors, buffer);
while (numSectors) {
if (numSectors > max_sec) cnt = max_sec; else cnt = numSectors;
size = cnt * sector_size;
LWP_MutexLock(usb_mutex);
memcpy(usb_buf2, buffer, size);
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_WRITE_SECTORS,
"ii:d", sector, cnt, usb_buf2, size);
LWP_MutexUnlock(usb_mutex);
//dbg_printf("usb_write_chunk(%u,%u)=%d\n", sector, cnt, ret);
if (ret < 0) return ret;
numSectors -= cnt;
sector += cnt;
buffer += size;
}
} else {
size = sector_size * numSectors;
/* Write data */
LWP_MutexLock(usb_mutex);
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_UMS_WRITE_SECTORS,
"ii:d", sector, numSectors, buffer, size);
LWP_MutexUnlock(usb_mutex);
}
return ret;
}
// DISC_INTERFACE methods
static bool __io_usb_IsInserted(void)
{
s32 ret;
u32 sec_size;
if (fd < 0) return false;
ret = USBStorage_GetCapacity(&sec_size);
if (ret == 0) return false;
if (sec_size < 512 || sec_size > 4096) return false;
// sector sizes other than 512 will hang/crash libfat/libntfs
// so we explicitly don't support it here
//if (sec_size != 512) return false;
return true;
}
static bool __io_usb_Startup(void)
{
if (USBStorage_Init() < 0) return false;
return __io_usb_IsInserted();
}
int usb_verbose = 0;
bool __io_usb_ReadSectors(u32 sector, u32 count, void *buffer)
{
s32 ret = USBStorage_ReadSectors(sector, count, buffer);
if (usb_verbose) {
printf("usb-r: %x [%d] = %d\n", sector, count, ret);
//sleep(1);
}
return ret >= 0;
// hermes and waninkoko up to rev20: success = 1
// rev21: success = 0
}
bool __io_usb_WriteSectors(u32 sector, u32 count, void *buffer)
{
/*if (!buffer || count>128) {
printf("USBWR %d %d %p \n", sector, count, buffer);
Wpad_WaitButtons();
}*/
s32 ret = USBStorage_WriteSectors(sector, count, buffer);
//printf("usb-w: %d %d %d\n", sector, count, ret); sleep(1);
return ret >= 0;
}
static bool __io_usb_ClearStatus(void)
{
return true;
}
static bool __io_usb_Shutdown(void)
{
// do nothing
return true;
}
static bool __io_usb_NOP(void)
{
// do nothing
return true;
}
const DISC_INTERFACE my_io_usbstorage = {
DEVICE_TYPE_WII_USB,
FEATURE_MEDIUM_CANREAD | FEATURE_MEDIUM_CANWRITE | FEATURE_WII_USB,
(FN_MEDIUM_STARTUP) &__io_usb_Startup,
(FN_MEDIUM_ISINSERTED) &__io_usb_IsInserted,
(FN_MEDIUM_READSECTORS) &__io_usb_ReadSectors,
(FN_MEDIUM_WRITESECTORS) &__io_usb_WriteSectors,
(FN_MEDIUM_CLEARSTATUS) &__io_usb_ClearStatus,
(FN_MEDIUM_SHUTDOWN) &__io_usb_Shutdown
};
// read-only
const DISC_INTERFACE my_io_usbstorage_ro = {
DEVICE_TYPE_WII_USB,
FEATURE_MEDIUM_CANREAD | FEATURE_WII_USB,
(FN_MEDIUM_STARTUP) &__io_usb_Startup,
(FN_MEDIUM_ISINSERTED) &__io_usb_IsInserted,
(FN_MEDIUM_READSECTORS) &__io_usb_ReadSectors,
(FN_MEDIUM_WRITESECTORS) &__io_usb_NOP, //&__io_usb_WriteSectors,
(FN_MEDIUM_CLEARSTATUS) &__io_usb_ClearStatus,
(FN_MEDIUM_SHUTDOWN) &__io_usb_Shutdown
};
s32 USBStorage_WBFS_Open(char *buffer)
{
void *buf = (void *)buffer;
u32 len = 8;
s32 ret;
/* Device not opened */
if (fd < 0)
return fd;
/* MEM1 buffer */
if (!__USBStorage_isMEM2Buffer(buffer)) {
/* Allocate memory */
//buf = iosAlloc(hid, len);
buf = usb_buf2;
if (!buf)
return IPC_ENOMEM;
memcpy(buf, buffer, len);
}
extern u32 wbfs_part_lba;
u32 part = wbfs_part_lba;
/* Read data */
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_WBFS_OPEN_DISC, "dd:", buf, len, &part, 4);
return ret;
}
#if 0
// woffset is in 32bit words, len is in bytes
s32 USBStorage_WBFS_Read(u32 woffset, u32 len, void *buffer)
{
void *buf = (void *)buffer;
s32 ret;
USBStorage_OpenDev();
/* Device not opened */
if (fd < 0)
return fd;
/* MEM1 buffer */
if (!__USBStorage_isMEM2Buffer(buffer)) {
/* Allocate memory */
//buf = iosAlloc(hid, len);
buf = usb_buf2;
if (!buf)
return IPC_ENOMEM;
}
*(char*)buf = 0;
/* Read data */
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_WBFS_READ_DISC, "ii:d", woffset, len, buf, len);
/* Copy data */
if (buf != buffer) {
memcpy(buffer, buf, len);
//iosFree(hid, buf);
}
return ret;
}
s32 USBStorage_WBFS_ReadDebug(u32 off, u32 size, void *buffer)
{
void *buf = (void *)buffer;
s32 ret;
USBStorage_OpenDev();
/* Device not opened */
if (fd < 0)
return fd;
/* MEM1 buffer */
if (!__USBStorage_isMEM2Buffer(buffer)) {
/* Allocate memory */
//buf = iosAlloc(hid, len);
buf = usb_buf2;
if (!buf)
return IPC_ENOMEM;
}
/* Read data */
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_WBFS_READ_DEBUG, "ii:d", off, size, buf, size);
/* Copy data */
if (buf != buffer) {
memcpy(buffer, buf, size);
//iosFree(hid, buf);
}
return ret;
}
s32 USBStorage_WBFS_SetDevice(int dev)
{
s32 ret;
static s32 retval = 0;
retval = 0;
USBStorage_OpenDev();
// Device not opened
if (fd < 0) return fd;
// ioctl
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_WBFS_SET_DEVICE, "i:i", dev, &retval);
if (retval) return retval;
return ret;
}
s32 USBStorage_WBFS_SetFragList(void *p, int size)
{
s32 ret;
USBStorage_OpenDev();
// Device not opened
if (fd < 0) return fd;
// ioctl
DCFlushRange(p, size);
ret = IOS_IoctlvFormat(hid, fd, USB_IOCTL_WBFS_SET_FRAGLIST, "d:", p, size);
return ret;
}
void usb_debug_dump(int arg)
{
//return;
char buf[2048]="";
//printf("\nehc fd: %d\n", fd);
int r = USBStorage_WBFS_ReadDebug(arg, sizeof(buf), buf);
printf("\n: %d %.2000s\n", r, buf);
}
#endif
| gpl-2.0 |
w5860363/wownemesis | src/server/scripts/Northrend/DraktharonKeep/boss_dred.cpp | 13 | 8244 | /*
* Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Comment: MAYBE need more improve the "Raptor Call".
*/
#include "ScriptPCH.h"
#include "drak_tharon_keep.h"
enum eSpells
{
SPELL_BELLOWING_ROAR = 22686, // fears the group, can be resisted/dispelled
SPELL_GRIEVOUS_BITE = 48920,
SPELL_MANGLING_SLASH = 48873, //casted on the current tank, adds debuf
SPELL_FEARSOME_ROAR = 48849,
H_SPELL_FEARSOME_ROAR = 59422, //Not stacking, debuff
SPELL_PIERCING_SLASH = 48878, //debuff -->Armor reduced by 75%
SPELL_RAPTOR_CALL = 59416, //dummy
SPELL_GUT_RIP = 49710,
SPELL_REND = 13738
};
enum eArchivements
{
ACHIEV_BETTER_OFF_DRED = 2039
};
enum Creatures
{
NPC_RAPTOR_1 = 26641,
NPC_RAPTOR_2 = 26628
};
class boss_dred : public CreatureScript
{
public:
boss_dred() : CreatureScript("boss_dred") { }
struct boss_dredAI : public ScriptedAI
{
boss_dredAI(Creature *c) : ScriptedAI(c)
{
pInstance = c->GetInstanceScript();
}
uint32 uiBellowingRoarTimer;
uint32 uiGrievousBiteTimer;
uint32 uiManglingSlashTimer;
uint32 uiFearsomeRoarTimer;
uint32 uiPiercingSlashTimer;
uint32 uiRaptorCallTimer;
InstanceScript* pInstance;
void Reset()
{
if (pInstance)
{
pInstance->SetData(DATA_DRED_EVENT,NOT_STARTED);
pInstance->SetData(DATA_KING_DRED_ACHIEV, 0);
}
uiBellowingRoarTimer = 33*IN_MILLISECONDS;
uiGrievousBiteTimer = 20*IN_MILLISECONDS;
uiManglingSlashTimer = 18500;
uiFearsomeRoarTimer = urand(10*IN_MILLISECONDS,20*IN_MILLISECONDS);
uiPiercingSlashTimer = 17*IN_MILLISECONDS;
uiRaptorCallTimer = urand(20*IN_MILLISECONDS,25*IN_MILLISECONDS);
}
void EnterCombat(Unit* /*who*/)
{
if (pInstance)
pInstance->SetData(DATA_DRED_EVENT,IN_PROGRESS);
}
void UpdateAI(const uint32 diff)
{
//Return since we have no target
if (!UpdateVictim())
return;
if (uiBellowingRoarTimer < diff)
{
DoCastAOE(SPELL_BELLOWING_ROAR, false);
uiBellowingRoarTimer = 40*IN_MILLISECONDS;
} else uiBellowingRoarTimer -=diff;
if (uiGrievousBiteTimer < diff)
{
DoCastVictim(SPELL_GRIEVOUS_BITE ,false);
uiGrievousBiteTimer = 20*IN_MILLISECONDS;
} else uiGrievousBiteTimer -=diff;
if (uiManglingSlashTimer < diff)
{
DoCastVictim(SPELL_MANGLING_SLASH,false);
uiManglingSlashTimer = 20*IN_MILLISECONDS;
} else uiManglingSlashTimer -=diff;
if (uiFearsomeRoarTimer < diff)
{
DoCastAOE(SPELL_FEARSOME_ROAR,false);
uiFearsomeRoarTimer = urand(16*IN_MILLISECONDS,18*IN_MILLISECONDS);
} else uiFearsomeRoarTimer -=diff;
if (uiPiercingSlashTimer < diff)
{
DoCastVictim(SPELL_PIERCING_SLASH,false);
uiPiercingSlashTimer = 20*IN_MILLISECONDS;
} else uiPiercingSlashTimer -=diff;
if (uiRaptorCallTimer < diff)
{
DoCastVictim(SPELL_RAPTOR_CALL,false);
float x,y,z;
me->GetClosePoint(x,y,z,me->GetObjectSize()/3,10.0f);
me->SummonCreature(RAND(NPC_RAPTOR_1,NPC_RAPTOR_2),x,y,z,0,TEMPSUMMON_DEAD_DESPAWN,1*IN_MILLISECONDS);
uiRaptorCallTimer = urand(20*IN_MILLISECONDS,25*IN_MILLISECONDS);
} else uiRaptorCallTimer -=diff;
DoMeleeAttackIfReady();
}
void JustDied(Unit* /*killer*/)
{
if (pInstance)
{
pInstance->SetData(DATA_DRED_EVENT,DONE);
if (IsHeroic() && pInstance->GetData(DATA_KING_DRED_ACHIEV) == 6)
pInstance->DoCompleteAchievement(ACHIEV_BETTER_OFF_DRED);
}
}
};
CreatureAI *GetAI(Creature *creature) const
{
return new boss_dredAI(creature);
}
};
class npc_drakkari_gutripper : public CreatureScript
{
public:
npc_drakkari_gutripper() : CreatureScript("npc_drakkari_gutripper") { }
struct npc_drakkari_gutripperAI : public ScriptedAI
{
npc_drakkari_gutripperAI(Creature *c) : ScriptedAI(c)
{
pInstance = c->GetInstanceScript();
}
InstanceScript* pInstance;
uint32 GutRipTimer;
void Reset()
{
GutRipTimer = urand(10000,15000);
}
void UpdateAI(const uint32 diff)
{
//Return since we have no target
if (!UpdateVictim())
return;
if (GutRipTimer < diff)
{
DoCastVictim(SPELL_GUT_RIP,false);
GutRipTimer = urand(10000,15000);
}else GutRipTimer -=diff;
DoMeleeAttackIfReady();
}
void JustDied(Unit* /*killer*/)
{
if (pInstance)
{
if (IsHeroic() && pInstance->GetData(DATA_DRED_EVENT) == IN_PROGRESS && pInstance->GetData(DATA_KING_DRED_ACHIEV) < 6)
{
pInstance->SetData(DATA_KING_DRED_ACHIEV, pInstance->GetData(DATA_KING_DRED_ACHIEV) + 1);
}
}
}
};
CreatureAI *GetAI(Creature *creature) const
{
return new npc_drakkari_gutripperAI(creature);
}
};
class npc_drakkari_scytheclaw : public CreatureScript
{
public:
npc_drakkari_scytheclaw() : CreatureScript("npc_drakkari_scytheclaw") { }
struct npc_drakkari_scytheclawAI : public ScriptedAI
{
npc_drakkari_scytheclawAI(Creature *c) : ScriptedAI(c)
{
pInstance = c->GetInstanceScript();
}
InstanceScript* pInstance;
uint32 uiRendTimer;
void Reset()
{
uiRendTimer = urand(10*IN_MILLISECONDS,15*IN_MILLISECONDS);
}
void UpdateAI(const uint32 diff)
{
//Return since we have no target
if (!UpdateVictim())
return;
if (uiRendTimer < diff)
{
DoCastVictim(SPELL_REND,false);
uiRendTimer = urand(10*IN_MILLISECONDS,15*IN_MILLISECONDS);
}else uiRendTimer -=diff;
DoMeleeAttackIfReady();
}
void JustDied(Unit* /*killer*/)
{
if (pInstance)
{
if (IsHeroic() && pInstance->GetData(DATA_DRED_EVENT) == IN_PROGRESS && pInstance->GetData(DATA_KING_DRED_ACHIEV) < 6)
{
pInstance->SetData(DATA_KING_DRED_ACHIEV, pInstance->GetData(DATA_KING_DRED_ACHIEV) + 1);
}
}
}
};
CreatureAI *GetAI(Creature *creature) const
{
return new npc_drakkari_scytheclawAI(creature);
}
};
void AddSC_boss_dred()
{
new npc_drakkari_gutripper;
new npc_drakkari_scytheclaw;
new boss_dred;
}
| gpl-2.0 |
dalingrin/Nook_Color_Kernel_Overclock | arch/arm/plat-omap/devices.c | 13 | 10591 | /*
* linux/arch/arm/plat-omap/devices.c
*
* Common platform device setup/initialization for OMAP1 and OMAP2
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c/menelaus.h>
#include <linux/bootmem.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <mach/tc.h>
#include <mach/board.h>
#include <mach/mmc.h>
#include <mach/mux.h>
#include <mach/gpio.h>
#include <mach/dsp_common.h>
#include <mach/mcbsp.h>
#if defined(CONFIG_OMAP_DSP) || defined(CONFIG_OMAP_DSP_MODULE)
static struct dsp_platform_data dsp_pdata = {
.kdev_list = LIST_HEAD_INIT(dsp_pdata.kdev_list),
};
static struct resource omap_dsp_resources[] = {
{
.name = "dsp_mmu",
.start = -1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device omap_dsp_device = {
.name = "dsp",
.id = -1,
.num_resources = ARRAY_SIZE(omap_dsp_resources),
.resource = omap_dsp_resources,
.dev = {
.platform_data = &dsp_pdata,
},
};
static inline void omap_init_dsp(void)
{
struct resource *res;
int irq;
if (cpu_is_omap15xx())
irq = INT_1510_DSP_MMU;
else if (cpu_is_omap16xx())
irq = INT_1610_DSP_MMU;
else if (cpu_is_omap24xx())
irq = INT_24XX_DSP_MMU;
res = platform_get_resource_byname(&omap_dsp_device,
IORESOURCE_IRQ, "dsp_mmu");
res->start = irq;
platform_device_register(&omap_dsp_device);
}
int dsp_kfunc_device_register(struct dsp_kfunc_device *kdev)
{
static DEFINE_MUTEX(dsp_pdata_lock);
spin_lock_init(&kdev->lock);
mutex_lock(&dsp_pdata_lock);
list_add_tail(&kdev->entry, &dsp_pdata.kdev_list);
mutex_unlock(&dsp_pdata_lock);
return 0;
}
EXPORT_SYMBOL(dsp_kfunc_device_register);
#else
static inline void omap_init_dsp(void) { }
#endif /* CONFIG_OMAP_DSP */
#if defined(CONFIG_MPU_BRIDGE) || defined(CONFIG_MPU_BRIDGE_MODULE)
static unsigned long dspbridge_phys_mempool_base;
void dspbridge_reserve_sdram(void)
{
void *va;
unsigned long size = CONFIG_BRIDGE_MEMPOOL_SIZE;
if (!size)
return;
va = __alloc_bootmem_nopanic(size, SZ_1M, 0);
if (!va) {
pr_err("%s: Failed to bootmem allocation(%lu bytes)\n",
__func__, size);
return;
}
dspbridge_phys_mempool_base = virt_to_phys(va);
}
unsigned long dspbridge_get_mempool_base(void)
{
return dspbridge_phys_mempool_base;
}
EXPORT_SYMBOL(dspbridge_get_mempool_base);
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_KEYBOARD_OMAP) || defined(CONFIG_KEYBOARD_OMAP_MODULE)
static void omap_init_kp(void)
{
/* 2430 and 34xx keypad is on TWL4030 */
if (cpu_is_omap2430() || cpu_is_omap34xx())
return;
if (machine_is_omap_h2() || machine_is_omap_h3()) {
omap_cfg_reg(F18_1610_KBC0);
omap_cfg_reg(D20_1610_KBC1);
omap_cfg_reg(D19_1610_KBC2);
omap_cfg_reg(E18_1610_KBC3);
omap_cfg_reg(C21_1610_KBC4);
omap_cfg_reg(G18_1610_KBR0);
omap_cfg_reg(F19_1610_KBR1);
omap_cfg_reg(H14_1610_KBR2);
omap_cfg_reg(E20_1610_KBR3);
omap_cfg_reg(E19_1610_KBR4);
omap_cfg_reg(N19_1610_KBR5);
} else if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
omap_cfg_reg(E2_730_KBR0);
omap_cfg_reg(J7_730_KBR1);
omap_cfg_reg(E1_730_KBR2);
omap_cfg_reg(F3_730_KBR3);
omap_cfg_reg(D2_730_KBR4);
omap_cfg_reg(C2_730_KBC0);
omap_cfg_reg(D3_730_KBC1);
omap_cfg_reg(E4_730_KBC2);
omap_cfg_reg(F4_730_KBC3);
omap_cfg_reg(E3_730_KBC4);
} else if (machine_is_omap_h4()) {
omap_cfg_reg(T19_24XX_KBR0);
omap_cfg_reg(R19_24XX_KBR1);
omap_cfg_reg(V18_24XX_KBR2);
omap_cfg_reg(M21_24XX_KBR3);
omap_cfg_reg(E5__24XX_KBR4);
if (omap_has_menelaus()) {
omap_cfg_reg(B3__24XX_KBR5);
omap_cfg_reg(AA4_24XX_KBC2);
omap_cfg_reg(B13_24XX_KBC6);
} else {
omap_cfg_reg(M18_24XX_KBR5);
omap_cfg_reg(H19_24XX_KBC2);
omap_cfg_reg(N19_24XX_KBC6);
}
omap_cfg_reg(R20_24XX_KBC0);
omap_cfg_reg(M14_24XX_KBC1);
omap_cfg_reg(V17_24XX_KBC3);
omap_cfg_reg(P21_24XX_KBC4);
omap_cfg_reg(L14_24XX_KBC5);
}
}
#else
static inline void omap_init_kp(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OMAP_MCBSP) || defined(CONFIG_OMAP_MCBSP_MODULE)
static struct platform_device **omap_mcbsp_devices;
void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
int size)
{
int i;
omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *),
GFP_KERNEL);
if (!omap_mcbsp_devices) {
printk(KERN_ERR "Could not register McBSP devices\n");
return;
}
for (i = 0; i < size; i++) {
struct platform_device *new_mcbsp;
int ret;
new_mcbsp = platform_device_alloc("omap-mcbsp", i + 1);
if (!new_mcbsp)
continue;
new_mcbsp->dev.platform_data = &config[i];
ret = platform_device_add(new_mcbsp);
if (ret) {
platform_device_put(new_mcbsp);
continue;
}
omap_mcbsp_devices[i] = new_mcbsp;
}
}
#else
void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
int size)
{ }
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
#define OMAP_MMC_NR_RES 2
/*
* Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
*/
int __init omap_mmc_add(const char *name, int id, unsigned long base,
unsigned long size, unsigned int irq,
struct omap_mmc_platform_data *data)
{
struct platform_device *pdev;
struct resource res[OMAP_MMC_NR_RES];
int ret;
pdev = platform_device_alloc(name, id);
if (!pdev)
return -ENOMEM;
memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
res[0].start = base;
res[0].end = base + size - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = res[1].end = irq;
res[1].flags = IORESOURCE_IRQ;
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (ret == 0)
ret = platform_device_add_data(pdev, data, sizeof(*data));
if (ret)
goto fail;
ret = platform_device_add(pdev);
if (ret)
goto fail;
/* return device handle to board setup code */
data->dev = &pdev->dev;
return 0;
fail:
platform_device_put(pdev);
return ret;
}
#endif
/*-------------------------------------------------------------------------*/
/* Numbering for the SPI-capable controllers when used for SPI:
* spi = 1
* uwire = 2
* mmc1..2 = 3..4
* mcbsp1..3 = 5..7
*/
#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
#define OMAP_UWIRE_BASE 0xfffb3000
static struct resource uwire_resources[] = {
{
.start = OMAP_UWIRE_BASE,
.end = OMAP_UWIRE_BASE + 0x20,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_uwire_device = {
.name = "omap_uwire",
.id = -1,
.num_resources = ARRAY_SIZE(uwire_resources),
.resource = uwire_resources,
};
static void omap_init_uwire(void)
{
/* FIXME define and use a boot tag; not all boards will be hooking
* up devices to the microwire controller, and multi-board configs
* mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
*/
/* board-specific code must configure chipselects (only a few
* are normally used) and SCLK/SDI/SDO (each has two choices).
*/
(void) platform_device_register(&omap_uwire_device);
}
#else
static inline void omap_init_uwire(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
static struct resource wdt_resources[] = {
{
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_wdt_device = {
.name = "omap_wdt",
.id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
};
static void omap_init_wdt(void)
{
if (cpu_is_omap16xx())
wdt_resources[0].start = 0xfffeb000;
else if (cpu_is_omap2420())
wdt_resources[0].start = 0x48022000; /* WDT2 */
else if (cpu_is_omap2430())
wdt_resources[0].start = 0x49016000; /* WDT2 */
else if (cpu_is_omap34xx())
wdt_resources[0].start = 0x48314000; /* WDT2 */
else
return;
wdt_resources[0].end = wdt_resources[0].start + 0x4f;
(void) platform_device_register(&omap_wdt_device);
}
#else
static inline void omap_init_wdt(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
#ifdef CONFIG_ARCH_OMAP24XX
#define OMAP_RNG_BASE 0x480A0000
#else
#define OMAP_RNG_BASE 0xfffe5000
#endif
static struct resource rng_resources[] = {
{
.start = OMAP_RNG_BASE,
.end = OMAP_RNG_BASE + 0x4f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_rng_device = {
.name = "omap_rng",
.id = -1,
.num_resources = ARRAY_SIZE(rng_resources),
.resource = rng_resources,
};
static void omap_init_rng(void)
{
(void) platform_device_register(&omap_rng_device);
}
#else
static inline void omap_init_rng(void) {}
#endif
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
*
* (a) Does any "standard config" pin muxing needed. Board-specific
* code will have muxed GPIO pins and done "nonstandard" setup;
* that code could live in the boot loader.
* (b) Populating board-specific platform_data with the data drivers
* rely on to handle wiring variations.
* (c) Creating platform devices as meaningful on this board and
* with this kernel configuration.
*
* Claiming GPIOs, and setting their direction and initial values, is the
* responsibility of the device drivers. So is responding to probe().
*
* Board-specific knowlege like creating devices or pin setup is to be
* kept out of drivers as much as possible. In particular, pin setup
* may be handled by the boot loader, and drivers should expect it will
* normally have been done by the time they're probed.
*/
static int __init omap_init_devices(void)
{
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
omap_init_dsp();
omap_init_kp();
omap_init_uwire();
omap_init_wdt();
omap_init_rng();
return 0;
}
arch_initcall(omap_init_devices);
| gpl-2.0 |
rneugeba/linux-stable | arch/alpha/kernel/sys_cabriolet.c | 269 | 12453 | // SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/alpha/kernel/sys_cabriolet.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164,
* PC164 and LX164.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask = ~0UL;
static inline void
cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
{
int ofs = (irq - 16) / 8;
outb(mask >> (16 + ofs * 8), 0x804 + ofs);
}
static inline void
cabriolet_enable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
}
static void
cabriolet_disable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
}
static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET",
.irq_unmask = cabriolet_enable_irq,
.irq_mask = cabriolet_disable_irq,
.irq_mask_ack = cabriolet_disable_irq,
};
static void
cabriolet_device_interrupt(unsigned long v)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers */
pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16);
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 4) {
isa_device_interrupt(v);
} else {
handle_irq(16 + i);
}
}
}
static void __init
common_init_irq(void (*srm_dev_int)(unsigned long v))
{
init_i8259a_irqs();
if (alpha_using_srm) {
alpha_mv.device_interrupt = srm_dev_int;
init_srm_irqs(35, 0);
}
else {
long i;
outb(0xff, 0x804);
outb(0xff, 0x805);
outb(0xff, 0x806);
for (i = 16; i < 35; ++i) {
irq_set_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
common_init_isa_dma();
setup_irq(16+4, &isa_cascade_irqaction);
}
#ifndef CONFIG_ALPHA_PC164
static void __init
cabriolet_init_irq(void)
{
common_init_irq(srm_device_interrupt);
}
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
/* In theory, the PC164 has the same interrupt hardware as the other
Cabriolet based systems. However, something got screwed up late
in the development cycle which broke the interrupt masking hardware.
Repeat, it is not possible to mask and ack interrupts. At all.
In an attempt to work around this, while processing interrupts,
we do not allow the IPL to drop below what it is currently. This
prevents the possibility of recursion.
??? Another option might be to force all PCI devices to use edge
triggered rather than level triggered interrupts. That might be
too invasive though. */
static void
pc164_srm_device_interrupt(unsigned long v)
{
__min_ipl = getipl();
srm_device_interrupt(v);
__min_ipl = 0;
}
static void
pc164_device_interrupt(unsigned long v)
{
__min_ipl = getipl();
cabriolet_device_interrupt(v);
__min_ipl = 0;
}
static void __init
pc164_init_irq(void)
{
common_init_irq(pc164_srm_device_interrupt);
}
#endif
/*
* The EB66+ is very similar to the EB66 except that it does not have
* the on-board NCR and Tulip chips. In the code below, I have used
* slot number to refer to the id select line and *not* the slot
* number used in the EB66+ documentation. However, in the table,
* I've given the slot number, the id select line and the Jxx number
* that's printed on the board. The interrupt pins from the PCI slots
* are wired into 3 interrupt summary registers at 0x804, 0x805 and
* 0x806 ISA.
*
* In the table, -1 means don't assign an IRQ number. This is usually
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int
eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
{16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */
{16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */
};
const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
/*
* The AlphaPC64 is very similar to the EB66+ except that its slots
* are numbered differently. In the code below, I have used slot
* number to refer to the id select line and *not* the slot number
* used in the AlphaPC64 documentation. However, in the table, I've
* given the slot number, the id select line and the Jxx number that's
* printed on the board. The interrupt pins from the PCI slots are
* wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806
* ISA.
*
* In the table, -1 means don't assign an IRQ number. This is usually
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int
cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
{ 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
{ 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{ 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static inline void __init
cabriolet_enable_ide(void)
{
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
pc873xx_enable_ide();
}
}
static inline void __init
cabriolet_init_pci(void)
{
common_init_pci();
cabriolet_enable_ide();
}
static inline void __init
cia_cab_init_pci(void)
{
cia_init_pci();
cabriolet_enable_ide();
}
/*
* The PC164 and LX164 have 19 PCI interrupts, four from each of the four
* PCI slots, the SIO, PCI/IDE, and USB.
*
* Each of the interrupts can be individually masked. This is
* accomplished by setting the appropriate bit in the mask register.
* A bit is set by writing a "1" to the desired position in the mask
* register and cleared by writing a "0". There are 3 mask registers
* located at ISA address 804h, 805h and 806h.
*
* An I/O read at ISA address 804h, 805h, 806h will return the
* state of the 11 PCI interrupts and not the state of the MASKED
* interrupts.
*
* Note: A write to I/O 804h, 805h, and 806h the mask register will be
* updated.
*
*
* ISA DATA<7:0>
* ISA +--------------------------------------------------------------+
* ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +==============================================================+
* 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 |
* +--------------------------------------------------------------+
* 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 |
* +--------------------------------------------------------------+
* 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 |
* +--------------------------------------------------------------+
* * Rsrv = reserved bits
* Note: The mask register is write-only.
*
* IdSel
* 5 32 bit PCI option slot 2
* 6 64 bit PCI option slot 0
* 7 64 bit PCI option slot 1
* 8 Saturn I/O
* 9 32 bit PCI option slot 3
* 10 USB
* 11 IDE
*
*/
static inline int
alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
{ 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
{ 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{ 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */
{ 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */
{ 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */
};
const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static inline void __init
alphapc164_init_pci(void)
{
cia_init_pci();
SMC93x_Init();
}
/*
* The System Vector
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
struct alpha_machine_vector cabriolet_mv __initmv = {
.vector_name = "Cabriolet",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cabriolet_init_pci,
.pci_map_irq = cabriolet_map_irq,
.pci_swizzle = common_swizzle,
};
#ifndef CONFIG_ALPHA_EB64P
ALIAS_MV(cabriolet)
#endif
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164)
struct alpha_machine_vector eb164_mv __initmv = {
.vector_name = "EB164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cia_cab_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = cabriolet_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P)
struct alpha_machine_vector eb66p_mv __initmv = {
.vector_name = "EB66+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = cabriolet_init_pci,
.pci_map_irq = eb66p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164)
struct alpha_machine_vector lx164_mv __initmv = {
.vector_name = "LX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 35,
.device_interrupt = cabriolet_device_interrupt,
.init_arch = pyxis_init_arch,
.init_irq = cabriolet_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphapc164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = alphapc164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(lx164)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
struct alpha_machine_vector pc164_mv __initmv = {
.vector_name = "PC164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 35,
.device_interrupt = pc164_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = pc164_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphapc164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = alphapc164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(pc164)
#endif
| gpl-2.0 |
davidgraeff/linux | crypto/ccm.c | 525 | 21923 | /*
* CCM: Counter with CBC-MAC
*
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "internal.h"
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
struct crypto_spawn cipher;
};
struct crypto_ccm_ctx {
struct crypto_cipher *cipher;
struct crypto_ablkcipher *ctr;
};
struct crypto_rfc4309_ctx {
struct crypto_aead *child;
u8 nonce[3];
};
struct crypto_ccm_req_priv_ctx {
u8 odata[16];
u8 idata[16];
u8 auth_tag[16];
u32 ilen;
u32 flags;
struct scatterlist src[2];
struct scatterlist dst[2];
struct ablkcipher_request abreq;
};
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
memset(block, 0, csize);
block += csize;
if (csize >= 4)
csize = 4;
else if (msglen > (1 << (8 * csize)))
return -EOVERFLOW;
data = cpu_to_be32(msglen);
memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
return 0;
}
static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ablkcipher *ctr = ctx->ctr;
struct crypto_cipher *tfm = ctx->cipher;
int err = 0;
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(ctr, key, keylen);
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
CRYPTO_TFM_RES_MASK);
if (err)
goto out;
crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tfm, key, keylen);
crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
CRYPTO_TFM_RES_MASK);
out:
return err;
}
static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 4:
case 6:
case 8:
case 10:
case 12:
case 14:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int format_input(u8 *info, struct aead_request *req,
unsigned int cryptlen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int lp = req->iv[0];
unsigned int l = lp + 1;
unsigned int m;
m = crypto_aead_authsize(aead);
memcpy(info, req->iv, 16);
/* format control info per RFC 3610 and
* NIST Special Publication 800-38C
*/
*info |= (8 * ((m - 2) / 2));
if (req->assoclen)
*info |= 64;
return set_msg_len(info + 16 - l, cryptlen, l);
}
static int format_adata(u8 *adata, unsigned int a)
{
int len = 0;
/* add control info for associated data
* RFC 3610 and NIST Special Publication 800-38C
*/
if (a < 65280) {
*(__be16 *)adata = cpu_to_be16(a);
len = 2;
} else {
*(__be16 *)adata = cpu_to_be16(0xfffe);
*(__be32 *)&adata[2] = cpu_to_be32(a);
len = 6;
}
return len;
}
static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
struct crypto_ccm_req_priv_ctx *pctx)
{
unsigned int bs = 16;
u8 *odata = pctx->odata;
u8 *idata = pctx->idata;
int datalen, getlen;
datalen = n;
/* first time in here, block may be partially filled. */
getlen = bs - pctx->ilen;
if (datalen >= getlen) {
memcpy(idata + pctx->ilen, data, getlen);
crypto_xor(odata, idata, bs);
crypto_cipher_encrypt_one(tfm, odata, odata);
datalen -= getlen;
data += getlen;
pctx->ilen = 0;
}
/* now encrypt rest of data */
while (datalen >= bs) {
crypto_xor(odata, data, bs);
crypto_cipher_encrypt_one(tfm, odata, odata);
datalen -= bs;
data += bs;
}
/* check and see if there's leftover data that wasn't
* enough to fill a block.
*/
if (datalen) {
memcpy(idata + pctx->ilen, data, datalen);
pctx->ilen += datalen;
}
}
static void get_data_to_compute(struct crypto_cipher *tfm,
struct crypto_ccm_req_priv_ctx *pctx,
struct scatterlist *sg, unsigned int len)
{
struct scatter_walk walk;
u8 *data_src;
int n;
scatterwalk_start(&walk, sg);
while (len) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
data_src = scatterwalk_map(&walk);
compute_mac(tfm, data_src, n, pctx);
len -= n;
scatterwalk_unmap(data_src);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, 0, len);
if (len)
crypto_yield(pctx->flags);
}
/* any leftover needs padding and then encrypted */
if (pctx->ilen) {
int padlen;
u8 *odata = pctx->odata;
u8 *idata = pctx->idata;
padlen = 16 - pctx->ilen;
memset(idata + pctx->ilen, 0, padlen);
crypto_xor(odata, idata, 16);
crypto_cipher_encrypt_one(tfm, odata, odata);
pctx->ilen = 0;
}
}
static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_cipher *cipher = ctx->cipher;
unsigned int assoclen = req->assoclen;
u8 *odata = pctx->odata;
u8 *idata = pctx->idata;
int err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
/* encrypt first block to use as start in computing mac */
crypto_cipher_encrypt_one(cipher, odata, odata);
/* format associated data and compute into mac */
if (assoclen) {
pctx->ilen = format_adata(idata, assoclen);
get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
} else {
pctx->ilen = 0;
}
/* compute plaintext into mac */
if (cryptlen)
get_data_to_compute(cipher, pctx, plain, cryptlen);
out:
return err;
}
static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
u8 *odata = pctx->odata;
if (!err)
scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
aead_request_complete(req, err);
}
static inline int crypto_ccm_check_iv(const u8 *iv)
{
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (1 > iv[0] || iv[0] > 7)
return -EINVAL;
return 0;
}
static int crypto_ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->abreq;
struct scatterlist *dst;
unsigned int cryptlen = req->cryptlen;
u8 *odata = pctx->odata;
u8 *iv = req->iv;
int err;
err = crypto_ccm_check_iv(iv);
if (err)
return err;
pctx->flags = aead_request_flags(req);
err = crypto_ccm_auth(req, req->src, cryptlen);
if (err)
return err;
/* Note: rfc 3610 and NIST 800-38C require counter of
* zero to encrypt auth tag.
*/
memset(iv + 15 - iv[0], 0, iv[0] + 1);
sg_init_table(pctx->src, 2);
sg_set_buf(pctx->src, odata, 16);
scatterwalk_sg_chain(pctx->src, 2, req->src);
dst = pctx->src;
if (req->src != req->dst) {
sg_init_table(pctx->dst, 2);
sg_set_buf(pctx->dst, odata, 16);
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
dst = pctx->dst;
}
ablkcipher_request_set_tfm(abreq, ctx->ctr);
ablkcipher_request_set_callback(abreq, pctx->flags,
crypto_ccm_encrypt_done, req);
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(odata, req->dst, cryptlen,
crypto_aead_authsize(aead), 1);
return err;
}
static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
if (!err) {
err = crypto_ccm_auth(req, req->dst, cryptlen);
if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
}
static int crypto_ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->abreq;
struct scatterlist *dst;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
u8 *iv = req->iv;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
err = crypto_ccm_check_iv(iv);
if (err)
return err;
pctx->flags = aead_request_flags(req);
scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
memset(iv + 15 - iv[0], 0, iv[0] + 1);
sg_init_table(pctx->src, 2);
sg_set_buf(pctx->src, authtag, 16);
scatterwalk_sg_chain(pctx->src, 2, req->src);
dst = pctx->src;
if (req->src != req->dst) {
sg_init_table(pctx->dst, 2);
sg_set_buf(pctx->dst, authtag, 16);
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
dst = pctx->dst;
}
ablkcipher_request_set_tfm(abreq, ctx->ctr);
ablkcipher_request_set_callback(abreq, pctx->flags,
crypto_ccm_decrypt_done, req);
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_ablkcipher_decrypt(abreq);
if (err)
return err;
err = crypto_ccm_auth(req, req->dst, cryptlen);
if (err)
return err;
/* verify */
if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG;
return err;
}
static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
struct crypto_ablkcipher *ctr;
unsigned long align;
int err;
cipher = crypto_spawn_cipher(&ictx->cipher);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_cipher;
ctx->cipher = cipher;
ctx->ctr = ctr;
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = align +
sizeof(struct crypto_ccm_req_priv_ctx) +
crypto_ablkcipher_reqsize(ctr);
return 0;
err_free_cipher:
crypto_free_cipher(cipher);
return err;
}
static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->cipher);
crypto_free_ablkcipher(ctx->ctr);
}
static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
const char *full_name,
const char *ctr_name,
const char *cipher_name)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_alg *ctr;
struct crypto_alg *cipher;
struct ccm_instance_ctx *ictx;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(cipher))
return ERR_CAST(cipher);
err = -EINVAL;
if (cipher->cra_blocksize != 16)
goto out_put_cipher;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
err = -ENOMEM;
if (!inst)
goto out_put_cipher;
ictx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ictx->cipher, cipher, inst,
CRYPTO_ALG_TYPE_MASK);
if (err)
goto err_free_inst;
crypto_set_skcipher_spawn(&ictx->ctr, inst);
err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto err_drop_cipher;
ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
/* Not a stream cipher? */
err = -EINVAL;
if (ctr->cra_blocksize != 1)
goto err_drop_ctr;
/* We want the real thing! */
if (ctr->cra_ablkcipher.ivsize != 16)
goto err_drop_ctr;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->cra_driver_name,
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
inst->alg.cra_blocksize = 1;
inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
(__alignof__(u32) - 1);
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = 16;
inst->alg.cra_aead.maxauthsize = 16;
inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
inst->alg.cra_init = crypto_ccm_init_tfm;
inst->alg.cra_exit = crypto_ccm_exit_tfm;
inst->alg.cra_aead.setkey = crypto_ccm_setkey;
inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
out:
crypto_mod_put(cipher);
return inst;
err_drop_ctr:
crypto_drop_skcipher(&ictx->ctr);
err_drop_cipher:
crypto_drop_spawn(&ictx->cipher);
err_free_inst:
kfree(inst);
out_put_cipher:
inst = ERR_PTR(err);
goto out;
}
static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return ERR_CAST(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
}
static void crypto_ccm_free(struct crypto_instance *inst)
{
struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_spawn(&ctx->cipher);
crypto_drop_skcipher(&ctx->ctr);
kfree(inst);
}
static struct crypto_template crypto_ccm_tmpl = {
.name = "ccm",
.alloc = crypto_ccm_alloc,
.free = crypto_ccm_free,
.module = THIS_MODULE,
};
static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
{
const char *ctr_name;
const char *cipher_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return ERR_CAST(ctr_name);
cipher_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(cipher_name))
return ERR_CAST(cipher_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
}
static struct crypto_template crypto_ccm_base_tmpl = {
.name = "ccm_base",
.alloc = crypto_ccm_base_alloc,
.free = crypto_ccm_free,
.module = THIS_MODULE,
};
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
int err;
if (keylen < 3)
return -EINVAL;
keylen -= 3;
memcpy(ctx->nonce, key + keylen, 3);
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(child, key, keylen);
crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
switch (authsize) {
case 8:
case 12:
case 16:
break;
default:
return -EINVAL;
}
return crypto_aead_setauthsize(ctx->child, authsize);
}
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_aead *child = ctx->child;
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1);
/* L' */
iv[0] = 3;
memcpy(iv + 1, ctx->nonce, 3);
memcpy(iv + 4, req->iv, 8);
aead_request_set_tfm(subreq, child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
aead_request_set_assoc(subreq, req->assoc, req->assoclen);
return subreq;
}
static int crypto_rfc4309_encrypt(struct aead_request *req)
{
req = crypto_rfc4309_crypt(req);
return crypto_aead_encrypt(req);
}
static int crypto_rfc4309_decrypt(struct aead_request *req)
{
req = crypto_rfc4309_crypt(req);
return crypto_aead_decrypt(req);
}
static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_aead *aead;
unsigned long align;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
ctx->child = aead;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = sizeof(struct aead_request) +
ALIGN(crypto_aead_reqsize(aead),
crypto_tfm_ctx_alignment()) +
align + 16;
return 0;
}
static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->child);
}
static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_aead_spawn *spawn;
struct crypto_alg *alg;
const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
return ERR_CAST(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
spawn = crypto_instance_ctx(inst);
crypto_set_aead_spawn(spawn, inst);
err = crypto_grab_aead(spawn, ccm_name, 0,
crypto_requires_sync(algt->type, algt->mask));
if (err)
goto out_free_inst;
alg = crypto_aead_spawn_alg(spawn);
err = -EINVAL;
/* We only support 16-byte blocks. */
if (alg->cra_aead.ivsize != 16)
goto out_drop_alg;
/* Not a stream cipher? */
if (alg->cra_blocksize != 1)
goto out_drop_alg;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4309(%s)", alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = 1;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_nivaead_type;
inst->alg.cra_aead.ivsize = 8;
inst->alg.cra_aead.maxauthsize = 16;
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
inst->alg.cra_init = crypto_rfc4309_init_tfm;
inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
inst->alg.cra_aead.geniv = "seqiv";
out:
return inst;
out_drop_alg:
crypto_drop_aead(spawn);
out_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
static void crypto_rfc4309_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_rfc4309_tmpl = {
.name = "rfc4309",
.alloc = crypto_rfc4309_alloc,
.free = crypto_rfc4309_free,
.module = THIS_MODULE,
};
static int __init crypto_ccm_module_init(void)
{
int err;
err = crypto_register_template(&crypto_ccm_base_tmpl);
if (err)
goto out;
err = crypto_register_template(&crypto_ccm_tmpl);
if (err)
goto out_undo_base;
err = crypto_register_template(&crypto_rfc4309_tmpl);
if (err)
goto out_undo_ccm;
out:
return err;
out_undo_ccm:
crypto_unregister_template(&crypto_ccm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_ccm_base_tmpl);
goto out;
}
static void __exit crypto_ccm_module_exit(void)
{
crypto_unregister_template(&crypto_rfc4309_tmpl);
crypto_unregister_template(&crypto_ccm_tmpl);
crypto_unregister_template(&crypto_ccm_base_tmpl);
}
module_init(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Counter with CBC MAC");
MODULE_ALIAS("ccm_base");
MODULE_ALIAS("rfc4309");
| gpl-2.0 |
liusen09003110-163-com/openwrt_14.07 | tools/firmware-utils/src/mkheader_gemtek.c | 525 | 5075 | /*
* Copyright (C) 2014 Claudio Leite <leitec@staticky.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Builds a proper flash image for routers using some Gemtek
* OEM boards. These include the Airlink101 AR725W, the
* Asante SmartHub 600 (AWRT-600N), and Linksys WRT100/110.
*
* The resulting image is compatible with the factory firmware
* web upgrade and TFTP interface.
*
* To build:
* gcc -O2 -o mkheader_gemtek mkheader_gemtek.c -lz
*
* Claudio Leite <leitec@staticky.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <zlib.h> /* for crc32() */
/*
* The header is in little-endian format. In case
* we are on a BE host, we need to swap binary
* values.
*/
#ifdef __APPLE__
# include <libkern/OSByteOrder.h>
# define le32 OSSwapHostToLittleInt32
#else
# if defined(__linux__)
# include <endian.h>
# if __BYTE_ORDER == __BIG_ENDIAN
# define CPU_BIG_ENDIAN
# endif
# else
# include <sys/endian.h> /* BSD's should have this */
# if _BYTE_ORDER == _BIG_ENDIAN
# define CPU_BIG_ENDIAN
# endif
# endif
# ifdef CPU_BIG_ENDIAN
# define le32(x) (((x & 0xff000000) >> 24) | \
((x & 0x00ff0000) >> 8) | \
((x & 0x0000ff00) << 8) | \
((x & 0x000000ff) << 24))
# else
# define le32(x) (x)
# endif
#endif
struct gemtek_header {
uint8_t magic[4];
uint8_t version[4];
uint32_t product_id;
uint32_t imagesz;
uint32_t checksum;
uint32_t fast_checksum;
uint8_t build[4];
uint8_t lang[4];
};
#define HDRLEN sizeof(struct gemtek_header)
struct machines {
char *desc;
char *id;
uint32_t maxsize;
struct gemtek_header header;
};
struct machines mach_def[] = {
{"Airlink101 AR725W", "ar725w", 0x340000,
{"GMTK", "1003", le32(0x03000001), 0, 0,
0, "01\0\0", "EN\0\0"}},
{"Asante AWRT-600N", "awrt600n", 0x340000,
{"A600", "1005", le32(0x03000001), 0, 0,
0, "01\0\0", "EN\0\0"}},
{"Linksys WRT100", "wrt100", 0x320000,
{"GMTK", "1007", le32(0x03040001), 0, 0,
0, "2\0\0\0", "EN\0\0"}},
{"Linksys WRT110", "wrt110", 0x320000,
{"GMTK", "1007", le32(0x03040001), 0, 0,
0, "2\0\0\0", "EN\0\0"}},
{0}
};
int
main(int argc, char *argv[])
{
unsigned long res, flen;
struct gemtek_header my_hdr;
FILE *f, *f_out;
int image_type = -1, index;
uint8_t *buf;
uint32_t crc;
if (argc < 3) {
fprintf(stderr, "mkheader_gemtek <uImage> <webflash image> [machine ID]\n");
fprintf(stderr, " where [machine ID] is one of:\n");
for (index = 0; mach_def[index].desc != 0; index++) {
fprintf(stderr, " %-10s %s", mach_def[index].id, mach_def[index].desc);
if (index == 0)
fprintf(stderr, " (default)\n");
else
fprintf(stderr, "\n");
}
exit(-1);
}
if (argc == 4) {
for(index = 0; mach_def[index].id != 0; index++) {
if(strcmp(mach_def[index].id, argv[3]) == 0) {
image_type = index;
break;
}
}
if(image_type == -1) {
fprintf(stderr, "\nERROR: invalid machine type\n");
exit(-1);
}
} else
image_type = 0;
printf("Opening %s...\n", argv[1]);
f = fopen(argv[1], "r");
if(!f) {
fprintf(stderr, "\nERROR: couldn't open input image\n");
exit(-1);
}
fseek(f, 0, SEEK_END);
flen = (unsigned long) ftell(f);
printf(" %lu (0x%lX) bytes long\n", flen, flen);
if (flen > mach_def[image_type].maxsize) {
fprintf(stderr, "\nERROR: image exceeds maximum compatible size\n");
goto f_error;
}
buf = malloc(flen + HDRLEN);
if (!buf) {
fprintf(stderr, "\nERROR: couldn't allocate buffer\n");
goto f_error;
}
rewind(f);
res = fread(buf + HDRLEN, 1, flen, f);
if (res != flen) {
perror("Couldn't read entire file: fread()");
goto f_error;
}
fclose(f);
printf("\nCreating %s...\n", argv[2]);
memcpy(&my_hdr, &mach_def[image_type].header, HDRLEN);
printf(" Using %s magic\n", mach_def[image_type].desc);
my_hdr.imagesz = le32(flen + HDRLEN);
memcpy(my_hdr.lang, "EN", 2);
memcpy(buf, &my_hdr, HDRLEN);
crc = crc32(0, buf, flen + HDRLEN);
printf(" CRC32: %08X\n", crc);
my_hdr.checksum = le32(crc);
memcpy(buf, &my_hdr, HDRLEN);
printf(" Writing...\n");
f_out = fopen(argv[2], "w");
if(!f_out) {
fprintf(stderr, "\nERROR: couldn't open output image\n");
exit(-1);
}
fwrite(buf, 1, flen + HDRLEN, f_out);
fclose(f_out);
free(buf);
return 0;
f_error:
fclose(f);
exit(-1);
}
| gpl-2.0 |
anasanzari/Cowcopy | drivers/isdn/hisax/isdnl3.c | 1293 | 13050 | /* $Id: isdnl3.c,v 2.22.2.3 2004/01/13 14:31:25 keil Exp $
*
* Author Karsten Keil
* based on the teles driver from Jan den Ouden
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
* Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
*
*/
#include <linux/init.h>
#include <linux/slab.h>
#include "hisax.h"
#include "isdnl3.h"
const char *l3_revision = "$Revision: 2.22.2.3 $";
static struct Fsm l3fsm;
enum {
ST_L3_LC_REL,
ST_L3_LC_ESTAB_WAIT,
ST_L3_LC_REL_DELAY,
ST_L3_LC_REL_WAIT,
ST_L3_LC_ESTAB,
};
#define L3_STATE_COUNT (ST_L3_LC_ESTAB + 1)
static char *strL3State[] =
{
"ST_L3_LC_REL",
"ST_L3_LC_ESTAB_WAIT",
"ST_L3_LC_REL_DELAY",
"ST_L3_LC_REL_WAIT",
"ST_L3_LC_ESTAB",
};
enum {
EV_ESTABLISH_REQ,
EV_ESTABLISH_IND,
EV_ESTABLISH_CNF,
EV_RELEASE_REQ,
EV_RELEASE_CNF,
EV_RELEASE_IND,
EV_TIMEOUT,
};
#define L3_EVENT_COUNT (EV_TIMEOUT + 1)
static char *strL3Event[] =
{
"EV_ESTABLISH_REQ",
"EV_ESTABLISH_IND",
"EV_ESTABLISH_CNF",
"EV_RELEASE_REQ",
"EV_RELEASE_CNF",
"EV_RELEASE_IND",
"EV_TIMEOUT",
};
static __printf(2, 3) void
l3m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
struct PStack *st = fi->userdata;
va_start(args, fmt);
VHiSax_putstatus(st->l1.hardware, st->l3.debug_id, fmt, args);
va_end(args);
}
u_char *
findie(u_char *p, int size, u_char ie, int wanted_set)
{
int l, codeset, maincodeset;
u_char *pend = p + size;
/* skip protocol discriminator, callref and message type */
p++;
l = (*p++) & 0xf;
p += l;
p++;
codeset = 0;
maincodeset = 0;
/* while there are bytes left... */
while (p < pend) {
if ((*p & 0xf0) == 0x90) {
codeset = *p & 0x07;
if (!(*p & 0x08))
maincodeset = codeset;
}
if (*p & 0x80)
p++;
else {
if (codeset == wanted_set) {
if (*p == ie)
{ /* improved length check (Werner Cornelius) */
if ((pend - p) < 2)
return (NULL);
if (*(p + 1) > (pend - (p + 2)))
return (NULL);
return (p);
}
if (*p > ie)
return (NULL);
}
p++;
l = *p++;
p += l;
codeset = maincodeset;
}
}
return (NULL);
}
int
getcallref(u_char *p)
{
int l, cr = 0;
p++; /* prot discr */
if (*p & 0xfe) /* wrong callref BRI only 1 octet*/
return (-2);
l = 0xf & *p++; /* callref length */
if (!l) /* dummy CallRef */
return (-1);
cr = *p++;
return (cr);
}
static int OrigCallRef = 0;
int
newcallref(void)
{
if (OrigCallRef == 127)
OrigCallRef = 1;
else
OrigCallRef++;
return (OrigCallRef);
}
void
newl3state(struct l3_process *pc, int state)
{
if (pc->debug & L3_DEB_STATE)
l3_debug(pc->st, "%s cr %d %d --> %d", __func__,
pc->callref & 0x7F,
pc->state, state);
pc->state = state;
}
static void
L3ExpireTimer(struct L3Timer *t)
{
t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
}
void
L3InitTimer(struct l3_process *pc, struct L3Timer *t)
{
t->pc = pc;
t->tl.function = (void *) L3ExpireTimer;
t->tl.data = (long) t;
init_timer(&t->tl);
}
void
L3DelTimer(struct L3Timer *t)
{
del_timer(&t->tl);
}
int
L3AddTimer(struct L3Timer *t,
int millisec, int event)
{
if (timer_pending(&t->tl)) {
printk(KERN_WARNING "L3AddTimer: timer already active!\n");
return -1;
}
init_timer(&t->tl);
t->event = event;
t->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&t->tl);
return 0;
}
void
StopAllL3Timer(struct l3_process *pc)
{
L3DelTimer(&pc->timer);
}
struct sk_buff *
l3_alloc_skb(int len)
{
struct sk_buff *skb;
if (!(skb = alloc_skb(len + MAX_HEADER_LEN, GFP_ATOMIC))) {
printk(KERN_WARNING "HiSax: No skb for D-channel\n");
return (NULL);
}
skb_reserve(skb, MAX_HEADER_LEN);
return (skb);
}
static void
no_l3_proto(struct PStack *st, int pr, void *arg)
{
struct sk_buff *skb = arg;
HiSax_putstatus(st->l1.hardware, "L3", "no D protocol");
if (skb) {
dev_kfree_skb(skb);
}
}
static int
no_l3_proto_spec(struct PStack *st, isdn_ctrl *ic)
{
printk(KERN_WARNING "HiSax: no specific protocol handler for proto %lu\n", ic->arg & 0xFF);
return (-1);
}
struct l3_process
*getl3proc(struct PStack *st, int cr)
{
struct l3_process *p = st->l3.proc;
while (p)
if (p->callref == cr)
return (p);
else
p = p->next;
return (NULL);
}
struct l3_process
*new_l3_process(struct PStack *st, int cr)
{
struct l3_process *p, *np;
if (!(p = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
printk(KERN_ERR "HiSax can't get memory for cr %d\n", cr);
return (NULL);
}
if (!st->l3.proc)
st->l3.proc = p;
else {
np = st->l3.proc;
while (np->next)
np = np->next;
np->next = p;
}
p->next = NULL;
p->debug = st->l3.debug;
p->callref = cr;
p->state = 0;
p->chan = NULL;
p->st = st;
p->N303 = st->l3.N303;
L3InitTimer(p, &p->timer);
return (p);
};
void
release_l3_process(struct l3_process *p)
{
struct l3_process *np, *pp = NULL;
if (!p)
return;
np = p->st->l3.proc;
while (np) {
if (np == p) {
StopAllL3Timer(p);
if (pp)
pp->next = np->next;
else if (!(p->st->l3.proc = np->next) &&
!test_bit(FLG_PTP, &p->st->l2.flag)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: last process");
if (skb_queue_empty(&p->st->l3.squeue)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: release link");
if (p->st->protocol != ISDN_PTYPE_NI1)
FsmEvent(&p->st->l3.l3m, EV_RELEASE_REQ, NULL);
else
FsmEvent(&p->st->l3.l3m, EV_RELEASE_IND, NULL);
} else {
if (p->debug)
l3_debug(p->st, "release_l3_process: not release link");
}
}
kfree(p);
return;
}
pp = np;
np = np->next;
}
printk(KERN_ERR "HiSax internal L3 error CR(%d) not in list\n", p->callref);
l3_debug(p->st, "HiSax internal L3 error CR(%d) not in list", p->callref);
};
static void
l3ml3p(struct PStack *st, int pr)
{
struct l3_process *p = st->l3.proc;
struct l3_process *np;
while (p) {
/* p might be kfreed under us, so we need to save where we want to go on */
np = p->next;
st->l3.l3ml3(st, pr, p);
p = np;
}
}
void
setstack_l3dc(struct PStack *st, struct Channel *chanp)
{
char tmp[64];
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
FsmInitTimer(&st->l3.l3m, &st->l3.l3m_timer);
strcpy(st->l3.debug_id, "L3DC ");
st->lli.l4l3_proto = no_l3_proto_spec;
#ifdef CONFIG_HISAX_EURO
if (st->protocol == ISDN_PTYPE_EURO) {
setstack_dss1(st);
} else
#endif
#ifdef CONFIG_HISAX_NI1
if (st->protocol == ISDN_PTYPE_NI1) {
setstack_ni1(st);
} else
#endif
#ifdef CONFIG_HISAX_1TR6
if (st->protocol == ISDN_PTYPE_1TR6) {
setstack_1tr6(st);
} else
#endif
if (st->protocol == ISDN_PTYPE_LEASED) {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
printk(KERN_INFO "HiSax: Leased line mode\n");
} else {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
sprintf(tmp, "protocol %s not supported",
(st->protocol == ISDN_PTYPE_1TR6) ? "1tr6" :
(st->protocol == ISDN_PTYPE_EURO) ? "euro" :
(st->protocol == ISDN_PTYPE_NI1) ? "ni1" :
"unknown");
printk(KERN_WARNING "HiSax: %s\n", tmp);
st->protocol = -1;
}
}
static void
isdnl3_trans(struct PStack *st, int pr, void *arg) {
st->l3.l3l2(st, pr, arg);
}
void
releasestack_isdnl3(struct PStack *st)
{
while (st->l3.proc)
release_l3_process(st->l3.proc);
if (st->l3.global) {
StopAllL3Timer(st->l3.global);
kfree(st->l3.global);
st->l3.global = NULL;
}
FsmDelTimer(&st->l3.l3m_timer, 54);
skb_queue_purge(&st->l3.squeue);
}
void
setstack_l3bc(struct PStack *st, struct Channel *chanp)
{
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
strcpy(st->l3.debug_id, "L3BC ");
st->lli.l4l3 = isdnl3_trans;
}
#define DREL_TIMER_VALUE 40000
static void
lc_activate(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_ESTAB_WAIT);
st->l3.l3l2(st, DL_ESTABLISH | REQUEST, NULL);
}
static void
lc_connect(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connect: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | INDICATION);
}
static void
lc_connected(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmDelTimer(&st->l3.l3m_timer, 51);
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connected: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | CONFIRM);
}
static void
lc_start_delay(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_start_delay_check(struct FsmInst *fi, int event, void *arg)
/* 20/09/00 - GE timer not user for NI-1 as layer 2 should stay up */
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
/* 19/09/00 - GE timer not user for NI-1 */
if (st->protocol != ISDN_PTYPE_NI1)
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_release_req(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
if (test_bit(FLG_L2BLOCK, &st->l2.flag)) {
if (st->l3.debug)
l3_debug(st, "lc_release_req: l2 blocked");
/* restart release timer */
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 51);
} else {
FsmChangeState(fi, ST_L3_LC_REL_WAIT);
st->l3.l3l2(st, DL_RELEASE | REQUEST, NULL);
}
}
static void
lc_release_ind(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmDelTimer(&st->l3.l3m_timer, 52);
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | INDICATION);
}
static void
lc_release_cnf(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | CONFIRM);
}
/* *INDENT-OFF* */
static struct FsmNode L3FnList[] __initdata =
{
{ST_L3_LC_REL, EV_ESTABLISH_REQ, lc_activate},
{ST_L3_LC_REL, EV_ESTABLISH_IND, lc_connect},
{ST_L3_LC_REL, EV_ESTABLISH_CNF, lc_connect},
{ST_L3_LC_ESTAB_WAIT, EV_ESTABLISH_CNF, lc_connected},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_REQ, lc_start_delay},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_REQ, lc_start_delay_check},
{ST_L3_LC_REL_DELAY, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_REL_DELAY, EV_ESTABLISH_REQ, lc_connected},
{ST_L3_LC_REL_DELAY, EV_TIMEOUT, lc_release_req},
{ST_L3_LC_REL_WAIT, EV_RELEASE_CNF, lc_release_cnf},
{ST_L3_LC_REL_WAIT, EV_ESTABLISH_REQ, lc_activate},
};
/* *INDENT-ON* */
void
l3_msg(struct PStack *st, int pr, void *arg)
{
switch (pr) {
case (DL_DATA | REQUEST):
if (st->l3.l3m.state == ST_L3_LC_ESTAB) {
st->l3.l3l2(st, pr, arg);
} else {
struct sk_buff *skb = arg;
skb_queue_tail(&st->l3.squeue, skb);
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
}
break;
case (DL_ESTABLISH | REQUEST):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
break;
case (DL_ESTABLISH | CONFIRM):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_CNF, NULL);
break;
case (DL_ESTABLISH | INDICATION):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_IND, NULL);
break;
case (DL_RELEASE | INDICATION):
FsmEvent(&st->l3.l3m, EV_RELEASE_IND, NULL);
break;
case (DL_RELEASE | CONFIRM):
FsmEvent(&st->l3.l3m, EV_RELEASE_CNF, NULL);
break;
case (DL_RELEASE | REQUEST):
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
break;
}
}
int __init
Isdnl3New(void)
{
l3fsm.state_count = L3_STATE_COUNT;
l3fsm.event_count = L3_EVENT_COUNT;
l3fsm.strEvent = strL3Event;
l3fsm.strState = strL3State;
return FsmNew(&l3fsm, L3FnList, ARRAY_SIZE(L3FnList));
}
void
Isdnl3Free(void)
{
FsmFree(&l3fsm);
}
| gpl-2.0 |
friedrich420/Galaxy-S6-Edge-AEL-Kernel-G925F- | drivers/mfd/pcf50633-adc.c | 2317 | 6029 | /* NXP PCF50633 ADC Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <balajirrao@openmoko.org>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte, Andy Green and Werner Almesberger
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* NOTE: This driver does not yet support subtractive ADC mode, which means
* you can do only one measurement per read request.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/adc.h>
struct pcf50633_adc_request {
int mux;
int avg;
void (*callback)(struct pcf50633 *, void *, int);
void *callback_param;
};
struct pcf50633_adc_sync_request {
int result;
struct completion completion;
};
#define PCF50633_MAX_ADC_FIFO_DEPTH 8
struct pcf50633_adc {
struct pcf50633 *pcf;
/* Private stuff */
struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
int queue_head;
int queue_tail;
struct mutex queue_mutex;
};
static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf)
{
return platform_get_drvdata(pcf->adc_pdev);
}
static void adc_setup(struct pcf50633 *pcf, int channel, int avg)
{
channel &= PCF50633_ADCC1_ADCMUX_MASK;
/* kill ratiometric, but enable ACCSW biasing */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00);
pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01);
/* start ADC conversion on selected channel */
pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg |
PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT);
}
static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head;
head = adc->queue_head;
if (!adc->queue[head])
return;
adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
}
static int
adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
{
struct pcf50633_adc *adc = __to_adc(pcf);
int head, tail;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
tail = adc->queue_tail;
if (adc->queue[tail]) {
mutex_unlock(&adc->queue_mutex);
dev_err(pcf->dev, "ADC queue is full, dropping request\n");
return -EBUSY;
}
adc->queue[tail] = req;
if (head == tail)
trigger_next_adc_job_if_any(pcf);
adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
mutex_unlock(&adc->queue_mutex);
return 0;
}
static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
int result)
{
struct pcf50633_adc_sync_request *req = param;
req->result = result;
complete(&req->completion);
}
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
struct pcf50633_adc_sync_request req;
int ret;
init_completion(&req.completion);
ret = pcf50633_adc_async_read(pcf, mux, avg,
pcf50633_adc_sync_read_callback, &req);
if (ret)
return ret;
wait_for_completion(&req.completion);
return req.result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
void (*callback)(struct pcf50633 *, void *, int),
void *callback_param)
{
struct pcf50633_adc_request *req;
/* req is freed when the result is ready, in interrupt handler */
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->mux = mux;
req->avg = avg;
req->callback = callback;
req->callback_param = callback_param;
return adc_enqueue_request(pcf, req);
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
static int adc_result(struct pcf50633 *pcf)
{
u8 adcs1, adcs3;
u16 result;
adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1);
adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3);
result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK);
dev_dbg(pcf->dev, "adc result = %d\n", result);
return result;
}
static void pcf50633_adc_irq(int irq, void *data)
{
struct pcf50633_adc *adc = data;
struct pcf50633 *pcf = adc->pcf;
struct pcf50633_adc_request *req;
int head, res;
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
req = adc->queue[head];
if (WARN_ON(!req)) {
dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n");
mutex_unlock(&adc->queue_mutex);
return;
}
adc->queue[head] = NULL;
adc->queue_head = (head + 1) &
(PCF50633_MAX_ADC_FIFO_DEPTH - 1);
res = adc_result(pcf);
trigger_next_adc_job_if_any(pcf);
mutex_unlock(&adc->queue_mutex);
req->callback(pcf, req->callback_param, res);
kfree(req);
}
static int pcf50633_adc_probe(struct platform_device *pdev)
{
struct pcf50633_adc *adc;
adc = kzalloc(sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
adc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, adc);
pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
pcf50633_adc_irq, adc);
mutex_init(&adc->queue_mutex);
return 0;
}
static int pcf50633_adc_remove(struct platform_device *pdev)
{
struct pcf50633_adc *adc = platform_get_drvdata(pdev);
int i, head;
pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY);
mutex_lock(&adc->queue_mutex);
head = adc->queue_head;
if (WARN_ON(adc->queue[head]))
dev_err(adc->pcf->dev,
"adc driver removed with request pending\n");
for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++)
kfree(adc->queue[i]);
mutex_unlock(&adc->queue_mutex);
kfree(adc);
return 0;
}
static struct platform_driver pcf50633_adc_driver = {
.driver = {
.name = "pcf50633-adc",
},
.probe = pcf50633_adc_probe,
.remove = pcf50633_adc_remove,
};
module_platform_driver(pcf50633_adc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 adc driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-adc");
| gpl-2.0 |
S4WRXTTCS/BeagleXM-Test | drivers/md/dm-raid1.c | 2829 | 35010 | /*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm-bio-record.h"
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
* Mirror set structures.
*---------------------------------------------------------------*/
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
DM_RAID1_FLUSH_ERROR,
DM_RAID1_SYNC_ERROR,
DM_RAID1_READ_ERROR
};
struct mirror {
struct mirror_set *ms;
atomic_t error_count;
unsigned long error_type;
struct dm_dev *dev;
sector_t offset;
};
struct mirror_set {
struct dm_target *ti;
struct list_head list;
uint64_t features;
spinlock_t lock; /* protects the lists */
struct bio_list reads;
struct bio_list writes;
struct bio_list failures;
struct bio_list holds; /* bios are waiting until suspend */
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
mempool_t *read_record_pool;
/* recovery */
region_t nr_regions;
int in_sync;
int log_failure;
int leg_failure;
atomic_t suspend;
atomic_t default_mirror; /* Default mirror */
struct workqueue_struct *kmirrord_wq;
struct work_struct kmirrord_work;
struct timer_list timer;
unsigned long timer_pending;
struct work_struct trigger_event;
unsigned nr_mirrors;
struct mirror mirror[0];
};
static void wakeup_mirrord(void *context)
{
struct mirror_set *ms = context;
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
static void delayed_wake_fn(unsigned long data)
{
struct mirror_set *ms = (struct mirror_set *) data;
clear_bit(0, &ms->timer_pending);
wakeup_mirrord(ms);
}
static void delayed_wake(struct mirror_set *ms)
{
if (test_and_set_bit(0, &ms->timer_pending))
return;
ms->timer.expires = jiffies + HZ / 5;
ms->timer.data = (unsigned long) ms;
ms->timer.function = delayed_wake_fn;
add_timer(&ms->timer);
}
static void wakeup_all_recovery_waiters(void *context)
{
wake_up_all(&_kmirrord_recovery_stopped);
}
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
{
unsigned long flags;
int should_wake = 0;
struct bio_list *bl;
bl = (rw == WRITE) ? &ms->writes : &ms->reads;
spin_lock_irqsave(&ms->lock, flags);
should_wake = !(bl->head);
bio_list_add(bl, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
}
static void dispatch_bios(void *context, struct bio_list *bio_list)
{
struct mirror_set *ms = context;
struct bio *bio;
while ((bio = bio_list_pop(bio_list)))
queue_bio(ms, bio, WRITE);
}
#define MIN_READ_RECORDS 20
struct dm_raid1_read_record {
struct mirror *m;
struct dm_bio_details details;
};
static struct kmem_cache *_dm_raid1_read_record_cache;
/*
* Every mirror should look like this one.
*/
#define DEFAULT_MIRROR 0
/*
* This is yucky. We squirrel the mirror struct away inside
* bi_next for read/write buffers. This is safe since the bh
* doesn't get submitted to the lower levels of block layer.
*/
static struct mirror *bio_get_m(struct bio *bio)
{
return (struct mirror *) bio->bi_next;
}
static void bio_set_m(struct bio *bio, struct mirror *m)
{
bio->bi_next = (struct bio *) m;
}
static struct mirror *get_default_mirror(struct mirror_set *ms)
{
return &ms->mirror[atomic_read(&ms->default_mirror)];
}
static void set_default_mirror(struct mirror *m)
{
struct mirror_set *ms = m->ms;
struct mirror *m0 = &(ms->mirror[0]);
atomic_set(&ms->default_mirror, m - m0);
}
static struct mirror *get_valid_mirror(struct mirror_set *ms)
{
struct mirror *m;
for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
if (!atomic_read(&m->error_count))
return m;
return NULL;
}
/* fail_mirror
* @m: mirror device to fail
* @error_type: one of the enum's, DM_RAID1_*_ERROR
*
* If errors are being handled, record the type of
* error encountered for this device. If this type
* of error has already been recorded, we can return;
* otherwise, we must signal userspace by triggering
* an event. Additionally, if the device is the
* primary device, we must choose a new primary, but
* only if the mirror is in-sync.
*
* This function must not block.
*/
static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
{
struct mirror_set *ms = m->ms;
struct mirror *new;
ms->leg_failure = 1;
/*
* error_count is used for nothing more than a
* simple way to tell if a device has encountered
* errors.
*/
atomic_inc(&m->error_count);
if (test_and_set_bit(error_type, &m->error_type))
return;
if (!errors_handled(ms))
return;
if (m != get_default_mirror(ms))
goto out;
if (!ms->in_sync) {
/*
* Better to issue requests to same failing device
* than to risk returning corrupt data.
*/
DMERR("Primary mirror (%s) failed while out-of-sync: "
"Reads may fail.", m->dev->name);
goto out;
}
new = get_valid_mirror(ms);
if (new)
set_default_mirror(new);
else
DMWARN("All sides of mirror have failed.");
out:
schedule_work(&ms->trigger_event);
}
static int mirror_flush(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
unsigned long error_bits;
unsigned int i;
struct dm_io_region io[ms->nr_mirrors];
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
};
for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
io[i].bdev = m->dev->bdev;
io[i].sector = 0;
io[i].count = 0;
}
error_bits = -1;
dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
if (unlikely(error_bits != 0)) {
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error_bits))
fail_mirror(ms->mirror + i,
DM_RAID1_FLUSH_ERROR);
return -EIO;
}
return 0;
}
/*-----------------------------------------------------------------
* Recovery.
*
* When a mirror is first activated we may find that some regions
* are in the no-sync state. We have to recover these by
* recopying from the default mirror to all the others.
*---------------------------------------------------------------*/
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
struct dm_region *reg = context;
struct mirror_set *ms = dm_rh_region_context(reg);
int m, bit = 0;
if (read_err) {
/* Read error means the failure of default mirror. */
DMERR_LIMIT("Unable to read primary mirror during recovery");
fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
}
if (write_err) {
DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
write_err);
/*
* Bits correspond to devices (excluding default mirror).
* The default mirror cannot change during recovery.
*/
for (m = 0; m < ms->nr_mirrors; m++) {
if (&ms->mirror[m] == get_default_mirror(ms))
continue;
if (test_bit(bit, &write_err))
fail_mirror(ms->mirror + m,
DM_RAID1_SYNC_ERROR);
bit++;
}
}
dm_rh_recovery_end(reg, !(read_err || write_err));
}
static int recover(struct mirror_set *ms, struct dm_region *reg)
{
int r;
unsigned i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
region_t key = dm_rh_get_region_key(reg);
sector_t region_size = dm_rh_get_region_size(ms->rh);
/* fill in the source */
m = get_default_mirror(ms);
from.bdev = m->dev->bdev;
from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
if (key == (ms->nr_regions - 1)) {
/*
* The final region may be smaller than
* region_size.
*/
from.count = ms->ti->len & (region_size - 1);
if (!from.count)
from.count = region_size;
} else
from.count = region_size;
/* fill in the destinations */
for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
if (&ms->mirror[i] == get_default_mirror(ms))
continue;
m = ms->mirror + i;
dest->bdev = m->dev->bdev;
dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
dest->count = from.count;
dest++;
}
/* hand to kcopyd */
if (!errors_handled(ms))
set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
flags, recovery_complete, reg);
return r;
}
static void do_recovery(struct mirror_set *ms)
{
struct dm_region *reg;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
int r;
/*
* Start quiescing some regions.
*/
dm_rh_recovery_prepare(ms->rh);
/*
* Copy any already quiesced regions.
*/
while ((reg = dm_rh_recovery_start(ms->rh))) {
r = recover(ms, reg);
if (r)
dm_rh_recovery_end(reg, 0);
}
/*
* Update the in sync flag.
*/
if (!ms->in_sync &&
(log->type->get_sync_count(log) == ms->nr_regions)) {
/* the sync is complete */
dm_table_event(ms->ti->table);
ms->in_sync = 1;
}
}
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
struct mirror *m = get_default_mirror(ms);
do {
if (likely(!atomic_read(&m->error_count)))
return m;
if (m-- == ms->mirror)
m += ms->nr_mirrors;
} while (m != get_default_mirror(ms));
return NULL;
}
static int default_ok(struct mirror *m)
{
struct mirror *default_mirror = get_default_mirror(m->ms);
return !atomic_read(&default_mirror->error_count);
}
static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
region_t region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->in_sync(log, region, 0))
return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
return 0;
}
/*
* remap a buffer to a particular mirror.
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
if (unlikely(!bio->bi_size))
return 0;
return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
}
static void map_bio(struct mirror *m, struct bio *bio)
{
bio->bi_bdev = m->dev->bdev;
bio->bi_sector = map_sector(m, bio);
}
static void map_region(struct dm_io_region *io, struct mirror *m,
struct bio *bio)
{
io->bdev = m->dev->bdev;
io->sector = map_sector(m, bio);
io->count = bio->bi_size >> 9;
}
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
/*
* Lock is required to avoid race condition during suspend
* process.
*/
spin_lock_irq(&ms->lock);
if (atomic_read(&ms->suspend)) {
spin_unlock_irq(&ms->lock);
/*
* If device is suspended, complete the bio.
*/
if (dm_noflush_suspending(ms->ti))
bio_endio(bio, DM_ENDIO_REQUEUE);
else
bio_endio(bio, -EIO);
return;
}
/*
* Hold bio until the suspend is complete.
*/
bio_list_add(&ms->holds, bio);
spin_unlock_irq(&ms->lock);
}
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
static void read_callback(unsigned long error, void *context)
{
struct bio *bio = context;
struct mirror *m;
m = bio_get_m(bio);
bio_set_m(bio, NULL);
if (likely(!error)) {
bio_endio(bio, 0);
return;
}
fail_mirror(m, DM_RAID1_READ_ERROR);
if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.",
m->dev->name);
queue_bio(m->ms, bio, bio_rw(bio));
return;
}
DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
m->dev->name);
bio_endio(bio, -EIO);
}
/* Asynchronous read. */
static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
.bi_rw = READ,
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = read_callback,
.notify.context = bio,
.client = m->ms->io_client,
};
map_region(&io, m, bio);
bio_set_m(bio, m);
BUG_ON(dm_io(&io_req, 1, &io, NULL));
}
static inline int region_in_sync(struct mirror_set *ms, region_t region,
int may_block)
{
int state = dm_rh_get_state(ms->rh, region, may_block);
return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
}
static void do_reads(struct mirror_set *ms, struct bio_list *reads)
{
region_t region;
struct bio *bio;
struct mirror *m;
while ((bio = bio_list_pop(reads))) {
region = dm_rh_bio_to_region(ms->rh, bio);
m = get_default_mirror(ms);
/*
* We can only read balance if the region is in sync.
*/
if (likely(region_in_sync(ms, region, 1)))
m = choose_mirror(ms, bio->bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
if (likely(m))
read_async_bio(m, bio);
else
bio_endio(bio, -EIO);
}
}
/*-----------------------------------------------------------------
* Writes.
*
* We do different things with the write io depending on the
* state of the region that it's in:
*
* SYNC: increment pending, use kcopyd to write to *all* mirrors
* RECOVERING: delay the io until recovery completes
* NOSYNC: increment pending, just write to the default mirror
*---------------------------------------------------------------*/
static void write_callback(unsigned long error, void *context)
{
unsigned i, ret = 0;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
unsigned long flags;
ms = bio_get_m(bio)->ms;
bio_set_m(bio, NULL);
/*
* NOTE: We don't decrement the pending count here,
* instead it is done by the targets endio function.
* This way we handle both writes to SYNC and NOSYNC
* regions with the same code.
*/
if (likely(!error)) {
bio_endio(bio, ret);
return;
}
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
/*
* Need to raise event. Since raising
* events can block, we need to do it in
* the main thread.
*/
spin_lock_irqsave(&ms->lock, flags);
if (!ms->failures.head)
should_wake = 1;
bio_list_add(&ms->failures, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
{
unsigned int i;
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = write_callback,
.notify.context = bio,
.client = ms->io_client,
};
if (bio->bi_rw & REQ_DISCARD) {
io_req.bi_rw |= REQ_DISCARD;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
map_region(dest++, m, bio);
/*
* Use default mirror because we only need it to retrieve the reference
* to the mirror set in write_callback().
*/
bio_set_m(bio, get_default_mirror(ms));
BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
{
int state;
struct bio *bio;
struct bio_list sync, nosync, recover, *this_list = NULL;
struct bio_list requeue;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
region_t region;
if (!writes->head)
return;
/*
* Classify each write.
*/
bio_list_init(&sync);
bio_list_init(&nosync);
bio_list_init(&recover);
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
if ((bio->bi_rw & REQ_FLUSH) ||
(bio->bi_rw & REQ_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->is_remote_recovering &&
log->type->is_remote_recovering(log, region)) {
bio_list_add(&requeue, bio);
continue;
}
state = dm_rh_get_state(ms->rh, region, 1);
switch (state) {
case DM_RH_CLEAN:
case DM_RH_DIRTY:
this_list = &sync;
break;
case DM_RH_NOSYNC:
this_list = &nosync;
break;
case DM_RH_RECOVERING:
this_list = &recover;
break;
}
bio_list_add(this_list, bio);
}
/*
* Add bios that are delayed due to remote recovery
* back on to the write queue
*/
if (unlikely(requeue.head)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->writes, &requeue);
spin_unlock_irq(&ms->lock);
delayed_wake(ms);
}
/*
* Increment the pending counts for any regions that will
* be written to (writes to recover regions are going to
* be delayed).
*/
dm_rh_inc_pending(ms->rh, &sync);
dm_rh_inc_pending(ms->rh, &nosync);
/*
* If the flush fails on a previous call and succeeds here,
* we must not reset the log_failure variable. We need
* userspace interaction to do that.
*/
ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
/*
* Dispatch io.
*/
if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
wakeup_mirrord(ms);
} else
while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
if (unlikely(ms->leg_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_add(&ms->failures, bio);
spin_unlock_irq(&ms->lock);
wakeup_mirrord(ms);
} else {
map_bio(get_default_mirror(ms), bio);
generic_make_request(bio);
}
}
}
static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
struct bio *bio;
if (likely(!failures->head))
return;
/*
* If the log has failed, unattempted writes are being
* put on the holds list. We can't issue those writes
* until a log has been marked, so we must store them.
*
* If a 'noflush' suspend is in progress, we can requeue
* the I/O's to the core. This give userspace a chance
* to reconfigure the mirror, at which point the core
* will reissue the writes. If the 'noflush' flag is
* not set, we have no choice but to return errors.
*
* Some writes on the failures list may have been
* submitted before the log failure and represent a
* failure to write to one of the devices. It is ok
* for us to treat them the same and requeue them
* as well.
*/
while ((bio = bio_list_pop(failures))) {
if (!ms->log_failure) {
ms->in_sync = 0;
dm_rh_mark_nosync(ms->rh, bio);
}
/*
* If all the legs are dead, fail the I/O.
* If we have been told to handle errors, hold the bio
* and wait for userspace to deal with the problem.
* Otherwise pretend that the I/O succeeded. (This would
* be wrong if the failed leg returned after reboot and
* got replicated back to the good legs.)
*/
if (!get_valid_mirror(ms))
bio_endio(bio, -EIO);
else if (errors_handled(ms))
hold_bio(ms, bio);
else
bio_endio(bio, 0);
}
}
static void trigger_event(struct work_struct *work)
{
struct mirror_set *ms =
container_of(work, struct mirror_set, trigger_event);
dm_table_event(ms->ti->table);
}
/*-----------------------------------------------------------------
* kmirrord
*---------------------------------------------------------------*/
static void do_mirror(struct work_struct *work)
{
struct mirror_set *ms = container_of(work, struct mirror_set,
kmirrord_work);
struct bio_list reads, writes, failures;
unsigned long flags;
spin_lock_irqsave(&ms->lock, flags);
reads = ms->reads;
writes = ms->writes;
failures = ms->failures;
bio_list_init(&ms->reads);
bio_list_init(&ms->writes);
bio_list_init(&ms->failures);
spin_unlock_irqrestore(&ms->lock, flags);
dm_rh_update_states(ms->rh, errors_handled(ms));
do_recovery(ms);
do_reads(ms, &reads);
do_writes(ms, &writes);
do_failures(ms, &failures);
}
/*-----------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
uint32_t region_size,
struct dm_target *ti,
struct dm_dirty_log *dl)
{
size_t len;
struct mirror_set *ms = NULL;
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
ms = kzalloc(len, GFP_KERNEL);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return NULL;
}
spin_lock_init(&ms->lock);
bio_list_init(&ms->reads);
bio_list_init(&ms->writes);
bio_list_init(&ms->failures);
bio_list_init(&ms->holds);
ms->ti = ti;
ms->nr_mirrors = nr_mirrors;
ms->nr_regions = dm_sector_div_up(ti->len, region_size);
ms->in_sync = 0;
ms->log_failure = 0;
ms->leg_failure = 0;
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
_dm_raid1_read_record_cache);
if (!ms->read_record_pool) {
ti->error = "Error creating mirror read_record_pool";
kfree(ms);
return NULL;
}
ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
wakeup_all_recovery_waiters,
ms->ti->begin, MAX_RECOVERY,
dl, region_size, ms->nr_regions);
if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
mempool_destroy(ms->read_record_pool);
kfree(ms);
return NULL;
}
return ms;
}
static void free_context(struct mirror_set *ms, struct dm_target *ti,
unsigned int m)
{
while (m--)
dm_put_device(ti, ms->mirror[m].dev);
dm_io_client_destroy(ms->io_client);
dm_region_hash_destroy(ms->rh);
mempool_destroy(ms->read_record_pool);
kfree(ms);
}
static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
unsigned int mirror, char **argv)
{
unsigned long long offset;
char dummy;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset";
return -EINVAL;
}
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) {
ti->error = "Device lookup failure";
return -ENXIO;
}
ms->mirror[mirror].ms = ms;
atomic_set(&(ms->mirror[mirror].error_count), 0);
ms->mirror[mirror].error_type = 0;
ms->mirror[mirror].offset = offset;
return 0;
}
/*
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
unsigned argc, char **argv,
unsigned *args_used)
{
unsigned param_count;
struct dm_dirty_log *dl;
char dummy;
if (argc < 2) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {
ti->error = "Invalid mirror log argument count";
return NULL;
}
*args_used = 2 + param_count;
if (argc < *args_used) {
ti->error = "Insufficient mirror log arguments";
return NULL;
}
dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
argv + 2);
if (!dl) {
ti->error = "Error creating mirror dirty log";
return NULL;
}
return dl;
}
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
unsigned *args_used)
{
unsigned num_features;
struct dm_target *ti = ms->ti;
char dummy;
*args_used = 0;
if (!argc)
return 0;
if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
ti->error = "Invalid number of features";
return -EINVAL;
}
argc--;
argv++;
(*args_used)++;
if (num_features > argc) {
ti->error = "Not enough arguments to support feature count";
return -EINVAL;
}
if (!strcmp("handle_errors", argv[0]))
ms->features |= DM_RAID1_HANDLE_ERRORS;
else {
ti->error = "Unrecognised feature requested";
return -EINVAL;
}
(*args_used)++;
return 0;
}
/*
* Construct a mirror mapping:
*
* log_type #log_params <log_params>
* #mirrors [mirror_path offset]{2,}
* [#features <features>]
*
* log_type is "core" or "disk"
* #log_params is between 1 and 3
*
* If present, features must be "handle_errors".
*/
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
unsigned int nr_mirrors, m, args_used;
struct mirror_set *ms;
struct dm_dirty_log *dl;
char dummy;
dl = create_dirty_log(ti, argc, argv, &args_used);
if (!dl)
return -EINVAL;
argv += args_used;
argc -= args_used;
if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
ti->error = "Invalid number of mirrors";
dm_dirty_log_destroy(dl);
return -EINVAL;
}
argv++, argc--;
if (argc < nr_mirrors * 2) {
ti->error = "Too few mirror arguments";
dm_dirty_log_destroy(dl);
return -EINVAL;
}
ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
if (!ms) {
dm_dirty_log_destroy(dl);
return -ENOMEM;
}
/* Get the mirror parameter sets */
for (m = 0; m < nr_mirrors; m++) {
r = get_mirror(ms, ti, m, argv);
if (r) {
free_context(ms, ti, m);
return r;
}
argv += 2;
argc -= 2;
}
ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
goto err_free_context;
}
INIT_WORK(&ms->kmirrord_work, do_mirror);
init_timer(&ms->timer);
ms->timer_pending = 0;
INIT_WORK(&ms->trigger_event, trigger_event);
r = parse_features(ms, argc, argv, &args_used);
if (r)
goto err_destroy_wq;
argv += args_used;
argc -= args_used;
/*
* Any read-balancing addition depends on the
* DM_RAID1_HANDLE_ERRORS flag being present.
* This is because the decision to balance depends
* on the sync state of a region. If the above
* flag is not present, we ignore errors; and
* the sync state may be inaccurate.
*/
if (argc) {
ti->error = "Too many mirror arguments";
r = -EINVAL;
goto err_destroy_wq;
}
ms->kcopyd_client = dm_kcopyd_client_create();
if (IS_ERR(ms->kcopyd_client)) {
r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
}
wakeup_mirrord(ms);
return 0;
err_destroy_wq:
destroy_workqueue(ms->kmirrord_wq);
err_free_context:
free_context(ms, ti, ms->nr_mirrors);
return r;
}
static void mirror_dtr(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
del_timer_sync(&ms->timer);
flush_workqueue(ms->kmirrord_wq);
flush_work_sync(&ms->trigger_event);
dm_kcopyd_client_destroy(ms->kcopyd_client);
destroy_workqueue(ms->kmirrord_wq);
free_context(ms, ti, ms->nr_mirrors);
}
/*
* Mirror mapping function
*/
static int mirror_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
int r, rw = bio_rw(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_raid1_read_record *read_record = NULL;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
if (r < 0 && r != -EWOULDBLOCK)
return r;
/*
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
if (rw == READA)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
/*
* The region is in-sync and we can perform reads directly.
* Store enough information so we can retry if it fails.
*/
m = choose_mirror(ms, bio->bi_sector);
if (unlikely(!m))
return -EIO;
read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
if (likely(read_record)) {
dm_bio_record(&read_record->details, bio);
map_context->ptr = read_record;
read_record->m = m;
}
map_bio(m, bio);
return DM_MAPIO_REMAPPED;
}
static int mirror_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
int rw = bio_rw(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
struct dm_raid1_read_record *read_record = map_context->ptr;
/*
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
if (!(bio->bi_rw & REQ_FLUSH))
dm_rh_dec(ms->rh, map_context->ll);
return error;
}
if (error == -EOPNOTSUPP)
goto out;
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
if (!read_record) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
* mirror in-sync.
*/
DMERR_LIMIT("Mirror read failed.");
return -EIO;
}
m = read_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
m->dev->name);
fail_mirror(m, DM_RAID1_READ_ERROR);
/*
* A failed read is requeued for another attempt using an intact
* mirror.
*/
if (default_ok(m) || mirror_available(ms, bio)) {
bd = &read_record->details;
dm_bio_restore(bd, bio);
mempool_free(read_record, ms->read_record_pool);
map_context->ptr = NULL;
queue_bio(ms, bio, rw);
return 1;
}
DMERR("All replicated volumes dead, failing I/O");
}
out:
if (read_record) {
mempool_free(read_record, ms->read_record_pool);
map_context->ptr = NULL;
}
return error;
}
static void mirror_presuspend(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
struct bio_list holds;
struct bio *bio;
atomic_set(&ms->suspend, 1);
/*
* Process bios in the hold list to start recovery waiting
* for bios in the hold list. After the process, no bio has
* a chance to be added in the hold list because ms->suspend
* is set.
*/
spin_lock_irq(&ms->lock);
holds = ms->holds;
bio_list_init(&ms->holds);
spin_unlock_irq(&ms->lock);
while ((bio = bio_list_pop(&holds)))
hold_bio(ms, bio);
/*
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
dm_rh_stop_recovery(ms->rh);
wait_event(_kmirrord_recovery_stopped,
!dm_rh_recovery_in_flight(ms->rh));
if (log->type->presuspend && log->type->presuspend(log))
/* FIXME: need better error handling */
DMWARN("log presuspend failed");
/*
* Now that recovery is complete/stopped and the
* delayed bios are queued, we need to wait for
* the worker thread to complete. This way,
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
}
static void mirror_postsuspend(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (log->type->postsuspend && log->type->postsuspend(log))
/* FIXME: need better error handling */
DMWARN("log postsuspend failed");
}
static void mirror_resume(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 0);
if (log->type->resume && log->type->resume(log))
/* FIXME: need better error handling */
DMWARN("log resume failed");
dm_rh_start_recovery(ms->rh);
}
/*
* device_status_char
* @m: mirror device/leg we want the status of
*
* We return one character representing the most severe error
* we have encountered.
* A => Alive - No failures
* D => Dead - A write failure occurred leaving mirror out-of-sync
* S => Sync - A sychronization failure occurred, mirror out-of-sync
* R => Read - A read failure occurred, mirror data unaffected
*
* Returns: <char>
*/
static char device_status_char(struct mirror *m)
{
if (!atomic_read(&(m->error_count)))
return 'A';
return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}
static int mirror_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1];
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++) {
DMEMIT("%s ", ms->mirror[m].dev->name);
buffer[m] = device_status_char(&(ms->mirror[m]));
}
buffer[m] = '\0';
DMEMIT("%llu/%llu 1 %s ",
(unsigned long long)log->type->get_sync_count(log),
(unsigned long long)ms->nr_regions, buffer);
sz += log->type->status(log, type, result+sz, maxlen-sz);
break;
case STATUSTYPE_TABLE:
sz = log->type->status(log, type, result, maxlen);
DMEMIT("%d", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT(" %s %llu", ms->mirror[m].dev->name,
(unsigned long long)ms->mirror[m].offset);
if (ms->features & DM_RAID1_HANDLE_ERRORS)
DMEMIT(" 1 handle_errors");
}
return 0;
}
static int mirror_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct mirror_set *ms = ti->private;
int ret = 0;
unsigned i;
for (i = 0; !ret && i < ms->nr_mirrors; i++)
ret = fn(ti, ms->mirror[i].dev,
ms->mirror[i].offset, ti->len, data);
return ret;
}
static struct target_type mirror_target = {
.name = "mirror",
.version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
.map = mirror_map,
.end_io = mirror_end_io,
.presuspend = mirror_presuspend,
.postsuspend = mirror_postsuspend,
.resume = mirror_resume,
.status = mirror_status,
.iterate_devices = mirror_iterate_devices,
};
static int __init dm_mirror_init(void)
{
int r;
_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
if (!_dm_raid1_read_record_cache) {
DMERR("Can't allocate dm_raid1_read_record cache");
r = -ENOMEM;
goto bad_cache;
}
r = dm_register_target(&mirror_target);
if (r < 0) {
DMERR("Failed to register mirror target");
goto bad_target;
}
return 0;
bad_target:
kmem_cache_destroy(_dm_raid1_read_record_cache);
bad_cache:
return r;
}
static void __exit dm_mirror_exit(void)
{
dm_unregister_target(&mirror_target);
kmem_cache_destroy(_dm_raid1_read_record_cache);
}
/* Module hooks */
module_init(dm_mirror_init);
module_exit(dm_mirror_exit);
MODULE_DESCRIPTION(DM_NAME " mirror target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.