repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
flar2/m8-GPE | drivers/media/rc/keymaps/rc-pctv-sedna.c | 370 | 1759 | /* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table pctv_sedna[] = {
{ 0x00, KEY_0 },
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x0a, KEY_AGAIN },
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_VOLUMEUP },
{ 0x0d, KEY_MODE },
{ 0x0e, KEY_STOP },
{ 0x0f, KEY_PREVIOUSSONG },
{ 0x10, KEY_ZOOM },
{ 0x11, KEY_VIDEO },
{ 0x12, KEY_POWER },
{ 0x13, KEY_MUTE },
{ 0x15, KEY_CHANNELDOWN },
{ 0x18, KEY_VOLUMEDOWN },
{ 0x19, KEY_CAMERA },
{ 0x1a, KEY_NEXTSONG },
{ 0x1b, KEY_TIME },
{ 0x1c, KEY_RADIO },
{ 0x1d, KEY_RECORD },
{ 0x1e, KEY_PAUSE },
{ 0x14, KEY_INFO },
{ 0x16, KEY_OK },
{ 0x17, KEY_DIGITS },
{ 0x1f, KEY_PLAY },
};
static struct rc_map_list pctv_sedna_map = {
.map = {
.scan = pctv_sedna,
.size = ARRAY_SIZE(pctv_sedna),
.rc_type = RC_TYPE_UNKNOWN,
.name = RC_MAP_PCTV_SEDNA,
}
};
static int __init init_rc_map_pctv_sedna(void)
{
return rc_map_register(&pctv_sedna_map);
}
static void __exit exit_rc_map_pctv_sedna(void)
{
rc_map_unregister(&pctv_sedna_map);
}
module_init(init_rc_map_pctv_sedna)
module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
jpirko/rocker_bpf | arch/arm/mach-omap2/pm24xx.c | 882 | 8162 | /*
* OMAP2 Power Management Routines
*
* Copyright (C) 2005 Texas Instruments, Inc.
* Copyright (C) 2006-2008 Nokia Corporation
*
* Written by:
* Richard Woodruff <r-woodruff2@ti.com>
* Tony Lindgren
* Juha Yrjola
* Amit Kucheria <amit.kucheria@nokia.com>
* Igor Stoppa <igor.stoppa@nokia.com>
*
* Based on pm.c for omap1
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/irq.h>
#include <linux/time.h>
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
#include <asm/fncpy.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
#include <asm/system_misc.h>
#include <linux/omap-dma.h>
#include "soc.h"
#include "common.h"
#include "clock.h"
#include "prm2xxx.h"
#include "prm-regbits-24xx.h"
#include "cm2xxx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
#include "sram.h"
#include "pm.h"
#include "control.h"
#include "powerdomain.h"
#include "clockdomain.h"
static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
static struct powerdomain *mpu_pwrdm, *core_pwrdm;
static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm;
static struct clk *osc_ck, *emul_ck;
static int omap2_enter_full_retention(void)
{
u32 l;
/* There is 1 reference hold for all children of the oscillator
* clock, the following will remove it. If no one else uses the
* oscillator itself it will be disabled if/when we enter retention
* mode.
*/
clk_disable(osc_ck);
/* Clear old wake-up events */
/* REVISIT: These write to reserved bits? */
omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
/* Workaround to kill USB */
l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
omap2_gpio_prepare_for_idle(0);
/* One last check for pending IRQs to avoid extra latency due
* to sleeping unnecessarily. */
if (omap_irq_pending())
goto no_sleep;
/* Jump to SRAM suspend code */
omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_POWER));
no_sleep:
omap2_gpio_resume_after_idle();
clk_enable(osc_ck);
/* clear CORE wake-up events */
omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
/* wakeup domain events - bit 1: GPT1, bit5 GPIO */
omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, 0x4 | 0x1);
/* MPU domain wake events */
omap_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET, 0x1);
omap_prm_clear_mod_irqs(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET, 0x20);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON);
return 0;
}
static int sti_console_enabled;
static int omap2_allow_mpu_retention(void)
{
if (!omap2xxx_cm_mpu_retention_allowed())
return 0;
if (sti_console_enabled)
return 0;
return 1;
}
static void omap2_enter_mpu_retention(void)
{
const int zero = 0;
/* The peripherals seem not to be able to wake up the MPU when
* it is in retention mode. */
if (omap2_allow_mpu_retention()) {
/* REVISIT: These write to reserved bits? */
omap_prm_clear_mod_irqs(CORE_MOD, PM_WKST1, ~0);
omap_prm_clear_mod_irqs(CORE_MOD, OMAP24XX_PM_WKST2, ~0);
omap_prm_clear_mod_irqs(WKUP_MOD, PM_WKST, ~0);
/* Try to enter MPU retention */
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
} else {
/* Block MPU retention */
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
}
/* WFI */
asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc");
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
}
static int omap2_can_sleep(void)
{
if (omap2xxx_cm_fclks_active())
return 0;
if (__clk_is_enabled(osc_ck))
return 0;
if (omap_dma_running())
return 0;
return 1;
}
static void omap2_pm_idle(void)
{
if (!omap2_can_sleep()) {
if (omap_irq_pending())
return;
omap2_enter_mpu_retention();
return;
}
if (omap_irq_pending())
return;
omap2_enter_full_retention();
}
static void __init prcm_setup_regs(void)
{
int i, num_mem_banks;
struct powerdomain *pwrdm;
/*
* Enable autoidle
* XXX This should be handled by hwmod code or PRCM init code
*/
omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
OMAP2_PRCM_SYSCONFIG_OFFSET);
/*
* Set CORE powerdomain memory banks to retain their contents
* during RETENTION
*/
num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm);
for (i = 0; i < num_mem_banks; i++)
pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET);
pwrdm_set_logic_retst(core_pwrdm, PWRDM_POWER_RET);
pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET);
/* Force-power down DSP, GFX powerdomains */
pwrdm = clkdm_get_pwrdm(dsp_clkdm);
pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
pwrdm = clkdm_get_pwrdm(gfx_clkdm);
pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
/* Enable hardware-supervised idle for all clkdms */
clkdm_for_each(omap_pm_clkdms_setup, NULL);
clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
omap_common_suspend_init(omap2_enter_full_retention);
/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
* stabilisation */
omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
OMAP2_PRCM_CLKSSETUP_OFFSET);
/* Configure automatic voltage transition */
omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
OMAP2_PRCM_VOLTSETUP_OFFSET);
omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
(0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
OMAP24XX_MEMRETCTRL_MASK |
(0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
(0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
/* Enable wake-up events */
omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
WKUP_MOD, PM_WKEN);
/* Enable SYS_CLKEN control when all domains idle */
omap2_prm_set_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK, OMAP24XX_GR_MOD,
OMAP2_PRCM_CLKSRC_CTRL_OFFSET);
}
int __init omap2_pm_init(void)
{
u32 l;
printk(KERN_INFO "Power Management for OMAP2 initializing\n");
l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
/* Look up important powerdomains */
mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
if (!mpu_pwrdm)
pr_err("PM: mpu_pwrdm not found\n");
core_pwrdm = pwrdm_lookup("core_pwrdm");
if (!core_pwrdm)
pr_err("PM: core_pwrdm not found\n");
/* Look up important clockdomains */
mpu_clkdm = clkdm_lookup("mpu_clkdm");
if (!mpu_clkdm)
pr_err("PM: mpu_clkdm not found\n");
wkup_clkdm = clkdm_lookup("wkup_clkdm");
if (!wkup_clkdm)
pr_err("PM: wkup_clkdm not found\n");
dsp_clkdm = clkdm_lookup("dsp_clkdm");
if (!dsp_clkdm)
pr_err("PM: dsp_clkdm not found\n");
gfx_clkdm = clkdm_lookup("gfx_clkdm");
if (!gfx_clkdm)
pr_err("PM: gfx_clkdm not found\n");
osc_ck = clk_get(NULL, "osc_ck");
if (IS_ERR(osc_ck)) {
printk(KERN_ERR "could not get osc_ck\n");
return -ENODEV;
}
if (cpu_is_omap242x()) {
emul_ck = clk_get(NULL, "emul_ck");
if (IS_ERR(emul_ck)) {
printk(KERN_ERR "could not get emul_ck\n");
clk_put(osc_ck);
return -ENODEV;
}
}
prcm_setup_regs();
/*
* We copy the assembler sleep/wakeup routines to SRAM.
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up after the entire
* chip enters idle.
*/
omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
omap24xx_cpu_suspend_sz);
arm_pm_idle = omap2_pm_idle;
return 0;
}
| gpl-2.0 |
bestmjh47/Ultimate-Kernel-EF34K | drivers/staging/gobi/QCUSBNet2k/QMIDevice.c | 1138 | 82313 | /*===========================================================================
FILE:
QMIDevice.c
DESCRIPTION:
Functions related to the QMI interface device
FUNCTIONS:
Generic functions
IsDeviceValid
PrintHex
QSetDownReason
QClearDownReason
QTestDownReason
Driver level asynchronous read functions
ReadCallback
IntCallback
StartRead
KillRead
Internal read/write functions
ReadAsync
UpSem
ReadSync
WriteSyncCallback
WriteSync
Internal memory management functions
GetClientID
ReleaseClientID
FindClientMem
AddToReadMemList
PopFromReadMemList
AddToNotifyList
NotifyAndPopNotifyList
AddToURBList
PopFromURBList
Userspace wrappers
UserspaceOpen
UserspaceIOCTL
UserspaceClose
UserspaceRead
UserspaceWrite
Initializer and destructor
RegisterQMIDevice
DeregisterQMIDevice
Driver level client management
QMIReady
QMIWDSCallback
SetupQMIWDSCallback
QMIDMSGetMEID
Copyright (c) 2010, Code Aurora Forum. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
===========================================================================*/
//---------------------------------------------------------------------------
// Include Files
//---------------------------------------------------------------------------
#include "QMIDevice.h"
//-----------------------------------------------------------------------------
// Definitions
//-----------------------------------------------------------------------------
extern int debug;
// Prototype to QCSuspend function
int QCSuspend(
struct usb_interface * pIntf,
pm_message_t powerEvent );
// IOCTL to generate a client ID for this service type
#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1
// IOCTL to get the VIDPID of the device
#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2
// IOCTL to get the MEID of the device
#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3
// CDC GET_ENCAPSULATED_RESPONSE packet
#define CDC_GET_ENCAPSULATED_RESPONSE 0x01A1ll
// CDC CONNECTION_SPEED_CHANGE indication packet
#define CDC_CONNECTION_SPEED_CHANGE 0x08000000002AA1ll
/*=========================================================================*/
// UserspaceQMIFops
// QMI device's userspace file operations
/*=========================================================================*/
struct file_operations UserspaceQMIFops =
{
.owner = THIS_MODULE,
.read = UserspaceRead,
.write = UserspaceWrite,
.ioctl = UserspaceIOCTL,
.open = UserspaceOpen,
.flush = UserspaceClose,
};
/*=========================================================================*/
// Generic functions
/*=========================================================================*/
/*===========================================================================
METHOD:
IsDeviceValid (Public Method)
DESCRIPTION:
Basic test to see if device memory is valid
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
bool
===========================================================================*/
bool IsDeviceValid( sQCUSBNet * pDev )
{
if (pDev == NULL)
{
return false;
}
if (pDev->mbQMIValid == false)
{
return false;
}
return true;
}
/*===========================================================================
METHOD:
PrintHex (Public Method)
DESCRIPTION:
Print Hex data, for debug purposes
PARAMETERS:
pBuffer [ I ] - Data buffer
bufSize [ I ] - Size of data buffer
RETURN VALUE:
None
===========================================================================*/
void PrintHex(
void * pBuffer,
u16 bufSize )
{
char * pPrintBuf;
u16 pos;
int status;
pPrintBuf = kmalloc( bufSize * 3 + 1, GFP_ATOMIC );
if (pPrintBuf == NULL)
{
DBG( "Unable to allocate buffer\n" );
return;
}
memset( pPrintBuf, 0 , bufSize * 3 + 1 );
for (pos = 0; pos < bufSize; pos++)
{
status = snprintf( (pPrintBuf + (pos * 3)),
4,
"%02X ",
*(u8 *)(pBuffer + pos) );
if (status != 3)
{
DBG( "snprintf error %d\n", status );
return;
}
}
DBG( " : %s\n", pPrintBuf );
kfree( pPrintBuf );
pPrintBuf = NULL;
return;
}
/*===========================================================================
METHOD:
QSetDownReason (Public Method)
DESCRIPTION:
Sets mDownReason and turns carrier off
PARAMETERS
pDev [ I ] - Device specific memory
reason [ I ] - Reason device is down
RETURN VALUE:
None
===========================================================================*/
void QSetDownReason(
sQCUSBNet * pDev,
u8 reason )
{
set_bit( reason, &pDev->mDownReason );
netif_carrier_off( pDev->mpNetDev->net );
}
/*===========================================================================
METHOD:
QClearDownReason (Public Method)
DESCRIPTION:
Clear mDownReason and may turn carrier on
PARAMETERS
pDev [ I ] - Device specific memory
reason [ I ] - Reason device is no longer down
RETURN VALUE:
None
===========================================================================*/
void QClearDownReason(
sQCUSBNet * pDev,
u8 reason )
{
clear_bit( reason, &pDev->mDownReason );
if (pDev->mDownReason == 0)
{
netif_carrier_on( pDev->mpNetDev->net );
}
}
/*===========================================================================
METHOD:
QTestDownReason (Public Method)
DESCRIPTION:
Test mDownReason and returns whether reason is set
PARAMETERS
pDev [ I ] - Device specific memory
reason [ I ] - Reason device is down
RETURN VALUE:
bool
===========================================================================*/
bool QTestDownReason(
sQCUSBNet * pDev,
u8 reason )
{
return test_bit( reason, &pDev->mDownReason );
}
/*=========================================================================*/
// Driver level asynchronous read functions
/*=========================================================================*/
/*===========================================================================
METHOD:
ReadCallback (Public Method)
DESCRIPTION:
Put the data in storage and notify anyone waiting for data
PARAMETERS
pReadURB [ I ] - URB this callback is run for
RETURN VALUE:
None
===========================================================================*/
void ReadCallback( struct urb * pReadURB )
{
int result;
u16 clientID;
sClientMemList * pClientMem;
void * pData;
void * pDataCopy;
u16 dataSize;
sQCUSBNet * pDev;
unsigned long flags;
u16 transactionID;
if (pReadURB == NULL)
{
DBG( "bad read URB\n" );
return;
}
pDev = pReadURB->context;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return;
}
if (pReadURB->status != 0)
{
DBG( "Read status = %d\n", pReadURB->status );
return;
}
DBG( "Read %d bytes\n", pReadURB->actual_length );
pData = pReadURB->transfer_buffer;
dataSize = pReadURB->actual_length;
PrintHex( pData, dataSize );
result = ParseQMUX( &clientID,
pData,
dataSize );
if (result < 0)
{
DBG( "Read error parsing QMUX %d\n", result );
return;
}
// Grab transaction ID
// Data large enough?
if (dataSize < result + 3)
{
DBG( "Data buffer too small to parse\n" );
return;
}
// Transaction ID size is 1 for QMICTL, 2 for others
if (clientID == QMICTL)
{
transactionID = *(u8*)(pData + result + 1);
}
else
{
transactionID = *(u16*)(pData + result + 1);
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Find memory storage for this service and Client ID
// Not using FindClientMem because it can't handle broadcasts
pClientMem = pDev->mQMIDev.mpClientMemList;
while (pClientMem != NULL)
{
if (pClientMem->mClientID == clientID
|| (pClientMem->mClientID | 0xff00) == clientID)
{
// Make copy of pData
pDataCopy = kmalloc( dataSize, GFP_ATOMIC );
memcpy( pDataCopy, pData, dataSize );
if (AddToReadMemList( pDev,
pClientMem->mClientID,
transactionID,
pDataCopy,
dataSize ) == false)
{
DBG( "Error allocating pReadMemListEntry "
"read will be discarded\n" );
kfree( pDataCopy );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return;
}
// Success
DBG( "Creating new readListEntry for client 0x%04X, TID %x\n",
clientID,
transactionID );
// Notify this client data exists
NotifyAndPopNotifyList( pDev,
pClientMem->mClientID,
transactionID );
// Not a broadcast
if (clientID >> 8 != 0xff)
{
break;
}
}
// Next element
pClientMem = pClientMem->mpNext;
}
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
}
/*===========================================================================
METHOD:
IntCallback (Public Method)
DESCRIPTION:
Data is available, fire off a read URB
PARAMETERS
pIntURB [ I ] - URB this callback is run for
RETURN VALUE:
None
===========================================================================*/
void IntCallback( struct urb * pIntURB )
{
int status;
int interval;
sQCUSBNet * pDev = (sQCUSBNet *)pIntURB->context;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return;
}
// Verify this was a normal interrupt
if (pIntURB->status != 0)
{
DBG( "Int status = %d\n", pIntURB->status );
// Ignore EOVERFLOW errors
if (pIntURB->status != -EOVERFLOW)
{
// Read 'thread' dies here
return;
}
}
else
{
// CDC GET_ENCAPSULATED_RESPONSE
if ((pIntURB->actual_length == 8)
&& (*(u64*)pIntURB->transfer_buffer == CDC_GET_ENCAPSULATED_RESPONSE))
{
// Time to read
usb_fill_control_urb( pDev->mQMIDev.mpReadURB,
pDev->mpNetDev->udev,
usb_rcvctrlpipe( pDev->mpNetDev->udev, 0 ),
(unsigned char *)pDev->mQMIDev.mpReadSetupPacket,
pDev->mQMIDev.mpReadBuffer,
DEFAULT_READ_URB_LENGTH,
ReadCallback,
pDev );
status = usb_submit_urb( pDev->mQMIDev.mpReadURB, GFP_ATOMIC );
if (status != 0)
{
DBG( "Error submitting Read URB %d\n", status );
return;
}
}
// CDC CONNECTION_SPEED_CHANGE
else if ((pIntURB->actual_length == 16)
&& (*(u64*)pIntURB->transfer_buffer == CDC_CONNECTION_SPEED_CHANGE))
{
// if upstream or downstream is 0, stop traffic. Otherwise resume it
if ((*(u32*)(pIntURB->transfer_buffer + 8) == 0)
|| (*(u32*)(pIntURB->transfer_buffer + 12) == 0))
{
QSetDownReason( pDev, CDC_CONNECTION_SPEED );
DBG( "traffic stopping due to CONNECTION_SPEED_CHANGE\n" );
}
else
{
QClearDownReason( pDev, CDC_CONNECTION_SPEED );
DBG( "resuming traffic due to CONNECTION_SPEED_CHANGE\n" );
}
}
else
{
DBG( "ignoring invalid interrupt in packet\n" );
PrintHex( pIntURB->transfer_buffer, pIntURB->actual_length );
}
}
interval = (pDev->mpNetDev->udev->speed == USB_SPEED_HIGH) ? 7 : 3;
// Reschedule interrupt URB
usb_fill_int_urb( pIntURB,
pIntURB->dev,
pIntURB->pipe,
pIntURB->transfer_buffer,
pIntURB->transfer_buffer_length,
pIntURB->complete,
pIntURB->context,
interval );
status = usb_submit_urb( pIntURB, GFP_ATOMIC );
if (status != 0)
{
DBG( "Error re-submitting Int URB %d\n", status );
}
return;
}
/*===========================================================================
METHOD:
StartRead (Public Method)
DESCRIPTION:
Start continuous read "thread" (callback driven)
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
int - 0 for success
negative errno for failure
===========================================================================*/
int StartRead( sQCUSBNet * pDev )
{
int interval;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Allocate URB buffers
pDev->mQMIDev.mpReadURB = usb_alloc_urb( 0, GFP_KERNEL );
if (pDev->mQMIDev.mpReadURB == NULL)
{
DBG( "Error allocating read urb\n" );
return -ENOMEM;
}
pDev->mQMIDev.mpIntURB = usb_alloc_urb( 0, GFP_KERNEL );
if (pDev->mQMIDev.mpIntURB == NULL)
{
DBG( "Error allocating int urb\n" );
return -ENOMEM;
}
// Create data buffers
pDev->mQMIDev.mpReadBuffer = kmalloc( DEFAULT_READ_URB_LENGTH, GFP_KERNEL );
if (pDev->mQMIDev.mpReadBuffer == NULL)
{
DBG( "Error allocating read buffer\n" );
return -ENOMEM;
}
pDev->mQMIDev.mpIntBuffer = kmalloc( DEFAULT_READ_URB_LENGTH, GFP_KERNEL );
if (pDev->mQMIDev.mpIntBuffer == NULL)
{
DBG( "Error allocating int buffer\n" );
return -ENOMEM;
}
pDev->mQMIDev.mpReadSetupPacket = kmalloc( sizeof( sURBSetupPacket ),
GFP_KERNEL );
if (pDev->mQMIDev.mpReadSetupPacket == NULL)
{
DBG( "Error allocating setup packet buffer\n" );
return -ENOMEM;
}
// CDC Get Encapsulated Response packet
pDev->mQMIDev.mpReadSetupPacket->mRequestType = 0xA1;
pDev->mQMIDev.mpReadSetupPacket->mRequestCode = 1;
pDev->mQMIDev.mpReadSetupPacket->mValue = 0;
pDev->mQMIDev.mpReadSetupPacket->mIndex = 0;
pDev->mQMIDev.mpReadSetupPacket->mLength = DEFAULT_READ_URB_LENGTH;
interval = (pDev->mpNetDev->udev->speed == USB_SPEED_HIGH) ? 7 : 3;
// Schedule interrupt URB
usb_fill_int_urb( pDev->mQMIDev.mpIntURB,
pDev->mpNetDev->udev,
usb_rcvintpipe( pDev->mpNetDev->udev, 0x81 ),
pDev->mQMIDev.mpIntBuffer,
DEFAULT_READ_URB_LENGTH,
IntCallback,
pDev,
interval );
return usb_submit_urb( pDev->mQMIDev.mpIntURB, GFP_KERNEL );
}
/*===========================================================================
METHOD:
KillRead (Public Method)
DESCRIPTION:
Kill continuous read "thread"
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
None
===========================================================================*/
void KillRead( sQCUSBNet * pDev )
{
// Stop reading
if (pDev->mQMIDev.mpReadURB != NULL)
{
DBG( "Killng read URB\n" );
usb_kill_urb( pDev->mQMIDev.mpReadURB );
}
if (pDev->mQMIDev.mpIntURB != NULL)
{
DBG( "Killng int URB\n" );
usb_kill_urb( pDev->mQMIDev.mpIntURB );
}
// Release buffers
kfree( pDev->mQMIDev.mpReadSetupPacket );
pDev->mQMIDev.mpReadSetupPacket = NULL;
kfree( pDev->mQMIDev.mpReadBuffer );
pDev->mQMIDev.mpReadBuffer = NULL;
kfree( pDev->mQMIDev.mpIntBuffer );
pDev->mQMIDev.mpIntBuffer = NULL;
// Release URB's
usb_free_urb( pDev->mQMIDev.mpReadURB );
pDev->mQMIDev.mpReadURB = NULL;
usb_free_urb( pDev->mQMIDev.mpIntURB );
pDev->mQMIDev.mpIntURB = NULL;
}
/*=========================================================================*/
// Internal read/write functions
/*=========================================================================*/
/*===========================================================================
METHOD:
ReadAsync (Public Method)
DESCRIPTION:
Start asynchronous read
NOTE: Reading client's data store, not device
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
pCallback [ I ] - Callback to be executed when data is available
pData [ I ] - Data buffer that willl be passed (unmodified)
to callback
RETURN VALUE:
int - 0 for success
negative errno for failure
===========================================================================*/
int ReadAsync(
sQCUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (*pCallback)(sQCUSBNet*, u16, void *),
void * pData )
{
sClientMemList * pClientMem;
sReadMemList ** ppReadMemList;
unsigned long flags;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Find memory storage for this client ID
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find matching client ID 0x%04X\n",
clientID );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -ENXIO;
}
ppReadMemList = &(pClientMem->mpList);
// Does data already exist?
while (*ppReadMemList != NULL)
{
// Is this element our data?
if (transactionID == 0
|| transactionID == (*ppReadMemList)->mTransactionID)
{
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// Run our own callback
pCallback( pDev, clientID, pData );
return 0;
}
// Next
ppReadMemList = &(*ppReadMemList)->mpNext;
}
// Data not found, add ourself to list of waiters
if (AddToNotifyList( pDev,
clientID,
transactionID,
pCallback,
pData ) == false)
{
DBG( "Unable to register for notification\n" );
}
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// Success
return 0;
}
/*===========================================================================
METHOD:
UpSem (Public Method)
DESCRIPTION:
Notification function for synchronous read
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
pData [ I ] - Buffer that holds semaphore to be up()-ed
RETURN VALUE:
None
===========================================================================*/
void UpSem(
sQCUSBNet * pDev,
u16 clientID,
void * pData )
{
DBG( "0x%04X\n", clientID );
up( (struct semaphore *)pData );
return;
}
/*===========================================================================
METHOD:
ReadSync (Public Method)
DESCRIPTION:
Start synchronous read
NOTE: Reading client's data store, not device
PARAMETERS:
pDev [ I ] - Device specific memory
ppOutBuffer [I/O] - On success, will be filled with a
pointer to read buffer
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
RETURN VALUE:
int - size of data read for success
negative errno for failure
===========================================================================*/
int ReadSync(
sQCUSBNet * pDev,
void ** ppOutBuffer,
u16 clientID,
u16 transactionID )
{
int result;
sClientMemList * pClientMem;
sNotifyList ** ppNotifyList, * pDelNotifyListEntry;
struct semaphore readSem;
void * pData;
unsigned long flags;
u16 dataSize;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Find memory storage for this Client ID
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find matching client ID 0x%04X\n",
clientID );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -ENXIO;
}
// Note: in cases where read is interrupted,
// this will verify client is still valid
while (PopFromReadMemList( pDev,
clientID,
transactionID,
&pData,
&dataSize ) == false)
{
// Data does not yet exist, wait
sema_init( &readSem, 0 );
// Add ourself to list of waiters
if (AddToNotifyList( pDev,
clientID,
transactionID,
UpSem,
&readSem ) == false)
{
DBG( "unable to register for notification\n" );
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -EFAULT;
}
// End critical section while we block
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// Wait for notification
result = down_interruptible( &readSem );
if (result != 0)
{
DBG( "Interrupted %d\n", result );
// readSem will fall out of scope,
// remove from notify list so it's not referenced
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
ppNotifyList = &(pClientMem->mpReadNotifyList);
pDelNotifyListEntry = NULL;
// Find and delete matching entry
while (*ppNotifyList != NULL)
{
if ((*ppNotifyList)->mpData == &readSem)
{
pDelNotifyListEntry = *ppNotifyList;
*ppNotifyList = (*ppNotifyList)->mpNext;
kfree( pDelNotifyListEntry );
break;
}
// Next
ppNotifyList = &(*ppNotifyList)->mpNext;
}
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -EINTR;
}
// Verify device is still valid
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Restart critical section and continue loop
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
}
// End Critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// Success
*ppOutBuffer = pData;
return dataSize;
}
/*===========================================================================
METHOD:
WriteSyncCallback (Public Method)
DESCRIPTION:
Write callback
PARAMETERS
pWriteURB [ I ] - URB this callback is run for
RETURN VALUE:
None
===========================================================================*/
void WriteSyncCallback( struct urb * pWriteURB )
{
if (pWriteURB == NULL)
{
DBG( "null urb\n" );
return;
}
DBG( "Write status/size %d/%d\n",
pWriteURB->status,
pWriteURB->actual_length );
// Notify that write has completed by up()-ing semeaphore
up( (struct semaphore * )pWriteURB->context );
return;
}
/*===========================================================================
METHOD:
WriteSync (Public Method)
DESCRIPTION:
Start synchronous write
PARAMETERS:
pDev [ I ] - Device specific memory
pWriteBuffer [ I ] - Data to be written
writeBufferSize [ I ] - Size of data to be written
clientID [ I ] - Client ID of requester
RETURN VALUE:
int - write size (includes QMUX)
negative errno for failure
===========================================================================*/
int WriteSync(
sQCUSBNet * pDev,
char * pWriteBuffer,
int writeBufferSize,
u16 clientID )
{
int result;
struct semaphore writeSem;
struct urb * pWriteURB;
sURBSetupPacket writeSetup;
unsigned long flags;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
pWriteURB = usb_alloc_urb( 0, GFP_KERNEL );
if (pWriteURB == NULL)
{
DBG( "URB mem error\n" );
return -ENOMEM;
}
// Fill writeBuffer with QMUX
result = FillQMUX( clientID, pWriteBuffer, writeBufferSize );
if (result < 0)
{
usb_free_urb( pWriteURB );
return result;
}
// CDC Send Encapsulated Request packet
writeSetup.mRequestType = 0x21;
writeSetup.mRequestCode = 0;
writeSetup.mValue = 0;
writeSetup.mIndex = 0;
writeSetup.mLength = 0;
writeSetup.mLength = writeBufferSize;
// Create URB
usb_fill_control_urb( pWriteURB,
pDev->mpNetDev->udev,
usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ),
(unsigned char *)&writeSetup,
(void*)pWriteBuffer,
writeBufferSize,
NULL,
pDev );
DBG( "Actual Write:\n" );
PrintHex( pWriteBuffer, writeBufferSize );
sema_init( &writeSem, 0 );
pWriteURB->complete = WriteSyncCallback;
pWriteURB->context = &writeSem;
// Wake device
result = usb_autopm_get_interface( pDev->mpIntf );
if (result < 0)
{
DBG( "unable to resume interface: %d\n", result );
// Likely caused by device going from autosuspend -> full suspend
if (result == -EPERM)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
pDev->mpNetDev->udev->auto_pm = 0;
#endif
QCSuspend( pDev->mpIntf, PMSG_SUSPEND );
}
return result;
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
if (AddToURBList( pDev, clientID, pWriteURB ) == false)
{
usb_free_urb( pWriteURB );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
usb_autopm_put_interface( pDev->mpIntf );
return -EINVAL;
}
result = usb_submit_urb( pWriteURB, GFP_KERNEL );
if (result < 0)
{
DBG( "submit URB error %d\n", result );
// Get URB back so we can destroy it
if (PopFromURBList( pDev, clientID ) != pWriteURB)
{
// This shouldn't happen
DBG( "Didn't get write URB back\n" );
}
usb_free_urb( pWriteURB );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
usb_autopm_put_interface( pDev->mpIntf );
return result;
}
// End critical section while we block
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// Wait for write to finish
result = down_interruptible( &writeSem );
// Verify device is still valid
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Write is done, release device
usb_autopm_put_interface( pDev->mpIntf );
// Restart critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Get URB back so we can destroy it
if (PopFromURBList( pDev, clientID ) != pWriteURB)
{
// This shouldn't happen
DBG( "Didn't get write URB back\n" );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -EINVAL;
}
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
if (result == 0)
{
// Write is finished
if (pWriteURB->status == 0)
{
// Return number of bytes that were supposed to have been written,
// not size of QMI request
result = writeBufferSize;
}
else
{
DBG( "bad status = %d\n", pWriteURB->status );
// Return error value
result = pWriteURB->status;
}
}
else
{
// We have been forcibly interrupted
DBG( "Interrupted %d !!!\n", result );
DBG( "Device may be in bad state and need reset !!!\n" );
// URB has not finished
usb_kill_urb( pWriteURB );
}
usb_free_urb( pWriteURB );
return result;
}
/*=========================================================================*/
// Internal memory management functions
/*=========================================================================*/
/*===========================================================================
METHOD:
GetClientID (Public Method)
DESCRIPTION:
Construct object/load file into memory
PARAMETERS:
pDev [ I ] - Device specific memory
serviceType [ I ] - Desired QMI service type
RETURN VALUE:
int - Client ID for success (positive)
Negative errno for error
===========================================================================*/
int GetClientID(
sQCUSBNet * pDev,
u8 serviceType )
{
u16 clientID;
sClientMemList ** ppClientMem;
int result;
void * pWriteBuffer;
u16 writeBufferSize;
void * pReadBuffer;
u16 readBufferSize;
unsigned long flags;
u8 transactionID;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device!\n" );
return -ENXIO;
}
// Run QMI request to be asigned a Client ID
if (serviceType != 0)
{
writeBufferSize = QMICTLGetClientIDReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
if (transactionID == 0)
{
atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
}
result = QMICTLGetClientIDReq( pWriteBuffer,
writeBufferSize,
transactionID,
serviceType );
if (result < 0)
{
kfree( pWriteBuffer );
return result;
}
result = WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
QMICTL );
kfree( pWriteBuffer );
if (result < 0)
{
return result;
}
result = ReadSync( pDev,
&pReadBuffer,
QMICTL,
transactionID );
if (result < 0)
{
DBG( "bad read data %d\n", result );
return result;
}
readBufferSize = result;
result = QMICTLGetClientIDResp( pReadBuffer,
readBufferSize,
&clientID );
kfree( pReadBuffer );
if (result < 0)
{
return result;
}
}
else
{
// QMI CTL will always have client ID 0
clientID = 0;
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Verify client is not already allocated
if (FindClientMem( pDev, clientID ) != NULL)
{
DBG( "Client memory already exists\n" );
// End Critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -ETOOMANYREFS;
}
// Go to last entry in client mem list
ppClientMem = &pDev->mQMIDev.mpClientMemList;
while (*ppClientMem != NULL)
{
ppClientMem = &(*ppClientMem)->mpNext;
}
// Create locations for read to place data into
*ppClientMem = kmalloc( sizeof( sClientMemList ), GFP_ATOMIC );
if (*ppClientMem == NULL)
{
DBG( "Error allocating read list\n" );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return -ENOMEM;
}
(*ppClientMem)->mClientID = clientID;
(*ppClientMem)->mpList = NULL;
(*ppClientMem)->mpReadNotifyList = NULL;
(*ppClientMem)->mpURBList = NULL;
(*ppClientMem)->mpNext = NULL;
// End Critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return clientID;
}
/*===========================================================================
METHOD:
ReleaseClientID (Public Method)
DESCRIPTION:
Release QMI client and free memory
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
RETURN VALUE:
None
===========================================================================*/
void ReleaseClientID(
sQCUSBNet * pDev,
u16 clientID )
{
int result;
sClientMemList ** ppDelClientMem;
sClientMemList * pNextClientMem;
struct urb * pDelURB;
void * pDelData;
u16 dataSize;
void * pWriteBuffer;
u16 writeBufferSize;
void * pReadBuffer;
u16 readBufferSize;
unsigned long flags;
u8 transactionID;
// Is device is still valid?
if (IsDeviceValid( pDev ) == false)
{
DBG( "invalid device\n" );
return;
}
DBG( "releasing 0x%04X\n", clientID );
// Run QMI ReleaseClientID if this isn't QMICTL
if (clientID != QMICTL)
{
// Note: all errors are non fatal, as we always want to delete
// client memory in latter part of function
writeBufferSize = QMICTLReleaseClientIDReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
DBG( "memory error\n" );
}
else
{
transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
if (transactionID == 0)
{
transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
}
result = QMICTLReleaseClientIDReq( pWriteBuffer,
writeBufferSize,
transactionID,
clientID );
if (result < 0)
{
kfree( pWriteBuffer );
DBG( "error %d filling req buffer\n", result );
}
else
{
result = WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
QMICTL );
kfree( pWriteBuffer );
if (result < 0)
{
DBG( "bad write status %d\n", result );
}
else
{
result = ReadSync( pDev,
&pReadBuffer,
QMICTL,
transactionID );
if (result < 0)
{
DBG( "bad read status %d\n", result );
}
else
{
readBufferSize = result;
result = QMICTLReleaseClientIDResp( pReadBuffer,
readBufferSize );
kfree( pReadBuffer );
if (result < 0)
{
DBG( "error %d parsing response\n", result );
}
}
}
}
}
}
// Cleaning up client memory
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Can't use FindClientMem, I need to keep pointer of previous
ppDelClientMem = &pDev->mQMIDev.mpClientMemList;
while (*ppDelClientMem != NULL)
{
if ((*ppDelClientMem)->mClientID == clientID)
{
pNextClientMem = (*ppDelClientMem)->mpNext;
// Notify all clients
while (NotifyAndPopNotifyList( pDev,
clientID,
0 ) == true );
// Kill and free all URB's
pDelURB = PopFromURBList( pDev, clientID );
while (pDelURB != NULL)
{
usb_kill_urb( pDelURB );
usb_free_urb( pDelURB );
pDelURB = PopFromURBList( pDev, clientID );
}
// Free any unread data
while (PopFromReadMemList( pDev,
clientID,
0,
&pDelData,
&dataSize ) == true )
{
kfree( pDelData );
}
// Delete client Mem
kfree( *ppDelClientMem );
// Overwrite the pointer that was to this client mem
*ppDelClientMem = pNextClientMem;
}
else
{
// I now point to ( a pointer of ((the node I was at)'s mpNext))
ppDelClientMem = &(*ppDelClientMem)->mpNext;
}
}
// End Critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
return;
}
/*===========================================================================
METHOD:
FindClientMem (Public Method)
DESCRIPTION:
Find this client's memory
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
RETURN VALUE:
sClientMemList - Pointer to requested sClientMemList for success
NULL for error
===========================================================================*/
sClientMemList * FindClientMem(
sQCUSBNet * pDev,
u16 clientID )
{
sClientMemList * pClientMem;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return NULL;
}
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
pClientMem = pDev->mQMIDev.mpClientMemList;
while (pClientMem != NULL)
{
if (pClientMem->mClientID == clientID)
{
// Success
//DBG( "Found client mem %p\n", pClientMem );
return pClientMem;
}
pClientMem = pClientMem->mpNext;
}
DBG( "Could not find client mem 0x%04X\n", clientID );
return NULL;
}
/*===========================================================================
METHOD:
AddToReadMemList (Public Method)
DESCRIPTION:
Add Data to this client's ReadMem list
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
pData [ I ] - Data to add
dataSize [ I ] - Size of data to add
RETURN VALUE:
bool
===========================================================================*/
bool AddToReadMemList(
sQCUSBNet * pDev,
u16 clientID,
u16 transactionID,
void * pData,
u16 dataSize )
{
sClientMemList * pClientMem;
sReadMemList ** ppThisReadMemList;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n",
clientID );
return false;
}
// Go to last ReadMemList entry
ppThisReadMemList = &pClientMem->mpList;
while (*ppThisReadMemList != NULL)
{
ppThisReadMemList = &(*ppThisReadMemList)->mpNext;
}
*ppThisReadMemList = kmalloc( sizeof( sReadMemList ), GFP_ATOMIC );
if (*ppThisReadMemList == NULL)
{
DBG( "Mem error\n" );
return false;
}
(*ppThisReadMemList)->mpNext = NULL;
(*ppThisReadMemList)->mpData = pData;
(*ppThisReadMemList)->mDataSize = dataSize;
(*ppThisReadMemList)->mTransactionID = transactionID;
return true;
}
/*===========================================================================
METHOD:
PopFromReadMemList (Public Method)
DESCRIPTION:
Remove data from this client's ReadMem list if it matches
the specified transaction ID.
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
ppData [I/O] - On success, will be filled with a
pointer to read buffer
pDataSize [I/O] - On succces, will be filled with the
read buffer's size
RETURN VALUE:
bool
===========================================================================*/
bool PopFromReadMemList(
sQCUSBNet * pDev,
u16 clientID,
u16 transactionID,
void ** ppData,
u16 * pDataSize )
{
sClientMemList * pClientMem;
sReadMemList * pDelReadMemList, ** ppReadMemList;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n",
clientID );
return false;
}
ppReadMemList = &(pClientMem->mpList);
pDelReadMemList = NULL;
// Find first message that matches this transaction ID
while (*ppReadMemList != NULL)
{
// Do we care about transaction ID?
if (transactionID == 0
|| transactionID == (*ppReadMemList)->mTransactionID )
{
pDelReadMemList = *ppReadMemList;
break;
}
DBG( "skipping 0x%04X data TID = %x\n", clientID, (*ppReadMemList)->mTransactionID );
// Next
ppReadMemList = &(*ppReadMemList)->mpNext;
}
if (pDelReadMemList != NULL)
{
*ppReadMemList = (*ppReadMemList)->mpNext;
// Copy to output
*ppData = pDelReadMemList->mpData;
*pDataSize = pDelReadMemList->mDataSize;
// Free memory
kfree( pDelReadMemList );
return true;
}
else
{
DBG( "No read memory to pop, Client 0x%04X, TID = %x\n",
clientID,
transactionID );
return false;
}
}
/*===========================================================================
METHOD:
AddToNotifyList (Public Method)
DESCRIPTION:
Add Notify entry to this client's notify List
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
pNotifyFunct [ I ] - Callback function to be run when data is available
pData [ I ] - Data buffer that willl be passed (unmodified)
to callback
RETURN VALUE:
bool
===========================================================================*/
bool AddToNotifyList(
sQCUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (* pNotifyFunct)(sQCUSBNet *, u16, void *),
void * pData )
{
sClientMemList * pClientMem;
sNotifyList ** ppThisNotifyList;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n", clientID );
return false;
}
// Go to last URBList entry
ppThisNotifyList = &pClientMem->mpReadNotifyList;
while (*ppThisNotifyList != NULL)
{
ppThisNotifyList = &(*ppThisNotifyList)->mpNext;
}
*ppThisNotifyList = kmalloc( sizeof( sNotifyList ), GFP_ATOMIC );
if (*ppThisNotifyList == NULL)
{
DBG( "Mem error\n" );
return false;
}
(*ppThisNotifyList)->mpNext = NULL;
(*ppThisNotifyList)->mpNotifyFunct = pNotifyFunct;
(*ppThisNotifyList)->mpData = pData;
(*ppThisNotifyList)->mTransactionID = transactionID;
return true;
}
/*===========================================================================
METHOD:
NotifyAndPopNotifyList (Public Method)
DESCRIPTION:
Remove first Notify entry from this client's notify list
and Run function
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
transactionID [ I ] - Transaction ID or 0 for any
RETURN VALUE:
bool
===========================================================================*/
bool NotifyAndPopNotifyList(
sQCUSBNet * pDev,
u16 clientID,
u16 transactionID )
{
sClientMemList * pClientMem;
sNotifyList * pDelNotifyList, ** ppNotifyList;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n", clientID );
return false;
}
ppNotifyList = &(pClientMem->mpReadNotifyList);
pDelNotifyList = NULL;
// Remove from list
while (*ppNotifyList != NULL)
{
// Do we care about transaction ID?
if (transactionID == 0
|| (*ppNotifyList)->mTransactionID == 0
|| transactionID == (*ppNotifyList)->mTransactionID)
{
pDelNotifyList = *ppNotifyList;
break;
}
DBG( "skipping data TID = %x\n", (*ppNotifyList)->mTransactionID );
// next
ppNotifyList = &(*ppNotifyList)->mpNext;
}
if (pDelNotifyList != NULL)
{
// Remove element
*ppNotifyList = (*ppNotifyList)->mpNext;
// Run notification function
if (pDelNotifyList->mpNotifyFunct != NULL)
{
// Unlock for callback
spin_unlock( &pDev->mQMIDev.mClientMemLock );
pDelNotifyList->mpNotifyFunct( pDev,
clientID,
pDelNotifyList->mpData );
// Restore lock
spin_lock( &pDev->mQMIDev.mClientMemLock );
}
// Delete memory
kfree( pDelNotifyList );
return true;
}
else
{
DBG( "no one to notify for TID %x\n", transactionID );
return false;
}
}
/*===========================================================================
METHOD:
AddToURBList (Public Method)
DESCRIPTION:
Add URB to this client's URB list
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
pURB [ I ] - URB to be added
RETURN VALUE:
bool
===========================================================================*/
bool AddToURBList(
sQCUSBNet * pDev,
u16 clientID,
struct urb * pURB )
{
sClientMemList * pClientMem;
sURBList ** ppThisURBList;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n", clientID );
return false;
}
// Go to last URBList entry
ppThisURBList = &pClientMem->mpURBList;
while (*ppThisURBList != NULL)
{
ppThisURBList = &(*ppThisURBList)->mpNext;
}
*ppThisURBList = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
if (*ppThisURBList == NULL)
{
DBG( "Mem error\n" );
return false;
}
(*ppThisURBList)->mpNext = NULL;
(*ppThisURBList)->mpURB = pURB;
return true;
}
/*===========================================================================
METHOD:
PopFromURBList (Public Method)
DESCRIPTION:
Remove URB from this client's URB list
Caller MUST have lock on mClientMemLock
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Requester's client ID
RETURN VALUE:
struct urb - Pointer to requested client's URB
NULL for error
===========================================================================*/
struct urb * PopFromURBList(
sQCUSBNet * pDev,
u16 clientID )
{
sClientMemList * pClientMem;
sURBList * pDelURBList;
struct urb * pURB;
#ifdef CONFIG_SMP
// Verify Lock
if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0)
{
DBG( "unlocked\n" );
BUG();
}
#endif
// Get this client's memory location
pClientMem = FindClientMem( pDev, clientID );
if (pClientMem == NULL)
{
DBG( "Could not find this client's memory 0x%04X\n", clientID );
return NULL;
}
// Remove from list
if (pClientMem->mpURBList != NULL)
{
pDelURBList = pClientMem->mpURBList;
pClientMem->mpURBList = pClientMem->mpURBList->mpNext;
// Copy to output
pURB = pDelURBList->mpURB;
// Delete memory
kfree( pDelURBList );
return pURB;
}
else
{
DBG( "No URB's to pop\n" );
return NULL;
}
}
/*=========================================================================*/
// Userspace wrappers
/*=========================================================================*/
/*===========================================================================
METHOD:
UserspaceOpen (Public Method)
DESCRIPTION:
Userspace open
IOCTL must be called before reads or writes
PARAMETERS
pInode [ I ] - kernel file descriptor
pFilp [ I ] - userspace file descriptor
RETURN VALUE:
int - 0 for success
Negative errno for failure
===========================================================================*/
int UserspaceOpen(
struct inode * pInode,
struct file * pFilp )
{
sQMIFilpStorage * pFilpData;
// Optain device pointer from pInode
sQMIDev * pQMIDev = container_of( pInode->i_cdev,
sQMIDev,
mCdev );
sQCUSBNet * pDev = container_of( pQMIDev,
sQCUSBNet,
mQMIDev );
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return -ENXIO;
}
// Setup data in pFilp->private_data
pFilp->private_data = kmalloc( sizeof( sQMIFilpStorage ), GFP_KERNEL );
if (pFilp->private_data == NULL)
{
DBG( "Mem error\n" );
return -ENOMEM;
}
pFilpData = (sQMIFilpStorage *)pFilp->private_data;
pFilpData->mClientID = (u16)-1;
pFilpData->mpDev = pDev;
return 0;
}
/*===========================================================================
METHOD:
UserspaceIOCTL (Public Method)
DESCRIPTION:
Userspace IOCTL functions
PARAMETERS
pUnusedInode [ I ] - (unused) kernel file descriptor
pFilp [ I ] - userspace file descriptor
cmd [ I ] - IOCTL command
arg [ I ] - IOCTL argument
RETURN VALUE:
int - 0 for success
Negative errno for failure
===========================================================================*/
int UserspaceIOCTL(
struct inode * pUnusedInode,
struct file * pFilp,
unsigned int cmd,
unsigned long arg )
{
int result;
u32 devVIDPID;
sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
if (pFilpData == NULL)
{
DBG( "Bad file data\n" );
return -EBADF;
}
if (IsDeviceValid( pFilpData->mpDev ) == false)
{
DBG( "Invalid device! Updating f_ops\n" );
pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
return -ENXIO;
}
switch (cmd)
{
case IOCTL_QMI_GET_SERVICE_FILE:
DBG( "Setting up QMI for service %lu\n", arg );
if ((u8)arg == 0)
{
DBG( "Cannot use QMICTL from userspace\n" );
return -EINVAL;
}
// Connection is already setup
if (pFilpData->mClientID != (u16)-1)
{
DBG( "Close the current connection before opening a new one\n" );
return -EBADR;
}
result = GetClientID( pFilpData->mpDev, (u8)arg );
if (result < 0)
{
return result;
}
pFilpData->mClientID = result;
return 0;
break;
case IOCTL_QMI_GET_DEVICE_VIDPID:
if (arg == 0)
{
DBG( "Bad VIDPID buffer\n" );
return -EINVAL;
}
// Extra verification
if (pFilpData->mpDev->mpNetDev == 0)
{
DBG( "Bad mpNetDev\n" );
return -ENOMEM;
}
if (pFilpData->mpDev->mpNetDev->udev == 0)
{
DBG( "Bad udev\n" );
return -ENOMEM;
}
devVIDPID = ((le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idVendor ) << 16)
+ le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idProduct ) );
result = copy_to_user( (unsigned int *)arg, &devVIDPID, 4 );
if (result != 0)
{
DBG( "Copy to userspace failure\n" );
}
return result;
break;
case IOCTL_QMI_GET_DEVICE_MEID:
if (arg == 0)
{
DBG( "Bad MEID buffer\n" );
return -EINVAL;
}
result = copy_to_user( (unsigned int *)arg, &pFilpData->mpDev->mMEID[0], 14 );
if (result != 0)
{
DBG( "copy to userspace failure\n" );
}
return result;
break;
default:
return -EBADRQC;
}
}
/*===========================================================================
METHOD:
UserspaceClose (Public Method)
DESCRIPTION:
Userspace close
Release client ID and free memory
PARAMETERS
pFilp [ I ] - userspace file descriptor
unusedFileTable [ I ] - (unused) file table
RETURN VALUE:
int - 0 for success
Negative errno for failure
===========================================================================*/
int UserspaceClose(
struct file * pFilp,
fl_owner_t unusedFileTable )
{
sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
struct list_head * pTasks;
struct task_struct * pEachTask;
struct fdtable * pFDT;
int count = 0;
int used = 0;
unsigned long flags;
if (pFilpData == NULL)
{
DBG( "bad file data\n" );
return -EBADF;
}
// Fallthough. If f_count == 1 no need to do more checks
if (atomic_read( &pFilp->f_count ) != 1)
{
// "group_leader" points to the main process' task, which resides in
// the global "tasks" list.
list_for_each( pTasks, ¤t->group_leader->tasks )
{
pEachTask = container_of( pTasks, struct task_struct, tasks );
if (pEachTask == NULL || pEachTask->files == NULL)
{
// Some tasks may not have files (e.g. Xsession)
continue;
}
spin_lock_irqsave( &pEachTask->files->file_lock, flags );
pFDT = files_fdtable( pEachTask->files );
for (count = 0; count < pFDT->max_fds; count++)
{
// Before this function was called, this file was removed
// from our task's file table so if we find it in a file
// table then it is being used by another task
if (pFDT->fd[count] == pFilp)
{
used++;
break;
}
}
spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
}
if (used > 0)
{
DBG( "not closing, as this FD is open by %d other process\n", used );
return 0;
}
}
if (IsDeviceValid( pFilpData->mpDev ) == false)
{
DBG( "Invalid device! Updating f_ops\n" );
pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
return -ENXIO;
}
DBG( "0x%04X\n", pFilpData->mClientID );
// Disable pFilpData so they can't keep sending read or write
// should this function hang
// Note: memory pointer is still saved in pFilpData to be deleted later
pFilp->private_data = NULL;
if (pFilpData->mClientID != (u16)-1)
{
ReleaseClientID( pFilpData->mpDev,
pFilpData->mClientID );
}
kfree( pFilpData );
return 0;
}
/*===========================================================================
METHOD:
UserspaceRead (Public Method)
DESCRIPTION:
Userspace read (synchronous)
PARAMETERS
pFilp [ I ] - userspace file descriptor
pBuf [ I ] - read buffer
size [ I ] - size of read buffer
pUnusedFpos [ I ] - (unused) file position
RETURN VALUE:
ssize_t - Number of bytes read for success
Negative errno for failure
===========================================================================*/
ssize_t UserspaceRead(
struct file * pFilp,
char __user * pBuf,
size_t size,
loff_t * pUnusedFpos )
{
int result;
void * pReadData = NULL;
void * pSmallReadData;
sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
if (pFilpData == NULL)
{
DBG( "Bad file data\n" );
return -EBADF;
}
if (IsDeviceValid( pFilpData->mpDev ) == false)
{
DBG( "Invalid device! Updating f_ops\n" );
pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
return -ENXIO;
}
if (pFilpData->mClientID == (u16)-1)
{
DBG( "Client ID must be set before reading 0x%04X\n",
pFilpData->mClientID );
return -EBADR;
}
// Perform synchronous read
result = ReadSync( pFilpData->mpDev,
&pReadData,
pFilpData->mClientID,
0 );
if (result <= 0)
{
return result;
}
// Discard QMUX header
result -= QMUXHeaderSize();
pSmallReadData = pReadData + QMUXHeaderSize();
if (result > size)
{
DBG( "Read data is too large for amount user has requested\n" );
kfree( pReadData );
return -EOVERFLOW;
}
if (copy_to_user( pBuf, pSmallReadData, result ) != 0)
{
DBG( "Error copying read data to user\n" );
result = -EFAULT;
}
// Reader is responsible for freeing read buffer
kfree( pReadData );
return result;
}
/*===========================================================================
METHOD:
UserspaceWrite (Public Method)
DESCRIPTION:
Userspace write (synchronous)
PARAMETERS
pFilp [ I ] - userspace file descriptor
pBuf [ I ] - write buffer
size [ I ] - size of write buffer
pUnusedFpos [ I ] - (unused) file position
RETURN VALUE:
ssize_t - Number of bytes read for success
Negative errno for failure
===========================================================================*/
ssize_t UserspaceWrite (
struct file * pFilp,
const char __user * pBuf,
size_t size,
loff_t * pUnusedFpos )
{
int status;
void * pWriteBuffer;
sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data;
if (pFilpData == NULL)
{
DBG( "Bad file data\n" );
return -EBADF;
}
if (IsDeviceValid( pFilpData->mpDev ) == false)
{
DBG( "Invalid device! Updating f_ops\n" );
pFilp->f_op = pFilp->f_dentry->d_inode->i_fop;
return -ENXIO;
}
if (pFilpData->mClientID == (u16)-1)
{
DBG( "Client ID must be set before writing 0x%04X\n",
pFilpData->mClientID );
return -EBADR;
}
// Copy data from user to kernel space
pWriteBuffer = kmalloc( size + QMUXHeaderSize(), GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
status = copy_from_user( pWriteBuffer + QMUXHeaderSize(), pBuf, size );
if (status != 0)
{
DBG( "Unable to copy data from userspace %d\n", status );
kfree( pWriteBuffer );
return status;
}
status = WriteSync( pFilpData->mpDev,
pWriteBuffer,
size + QMUXHeaderSize(),
pFilpData->mClientID );
kfree( pWriteBuffer );
// On success, return requested size, not full QMI reqest size
if (status == size + QMUXHeaderSize())
{
return size;
}
else
{
return status;
}
}
/*=========================================================================*/
// Initializer and destructor
/*=========================================================================*/
/*===========================================================================
METHOD:
RegisterQMIDevice (Public Method)
DESCRIPTION:
QMI Device initialization function
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
int - 0 for success
Negative errno for failure
===========================================================================*/
int RegisterQMIDevice( sQCUSBNet * pDev )
{
int result;
int QCQMIIndex = 0;
dev_t devno;
char * pDevName;
pDev->mbQMIValid = true;
// Set up for QMICTL
// (does not send QMI message, just sets up memory)
result = GetClientID( pDev, QMICTL );
if (result != 0)
{
pDev->mbQMIValid = false;
return result;
}
atomic_set( &pDev->mQMIDev.mQMICTLTransactionID, 1 );
// Start Async reading
result = StartRead( pDev );
if (result != 0)
{
pDev->mbQMIValid = false;
return result;
}
// Device is not ready for QMI connections right away
// Wait up to 30 seconds before failing
if (QMIReady( pDev, 30000 ) == false)
{
DBG( "Device unresponsive to QMI\n" );
return -ETIMEDOUT;
}
// Setup WDS callback
result = SetupQMIWDSCallback( pDev );
if (result != 0)
{
pDev->mbQMIValid = false;
return result;
}
// Fill MEID for device
result = QMIDMSGetMEID( pDev );
if (result != 0)
{
pDev->mbQMIValid = false;
return result;
}
// allocate and fill devno with numbers
result = alloc_chrdev_region( &devno, 0, 1, "qcqmi" );
if (result < 0)
{
return result;
}
// Create cdev
cdev_init( &pDev->mQMIDev.mCdev, &UserspaceQMIFops );
pDev->mQMIDev.mCdev.owner = THIS_MODULE;
pDev->mQMIDev.mCdev.ops = &UserspaceQMIFops;
result = cdev_add( &pDev->mQMIDev.mCdev, devno, 1 );
if (result != 0)
{
DBG( "error adding cdev\n" );
return result;
}
// Match interface number (usb#)
pDevName = strstr( pDev->mpNetDev->net->name, "usb" );
if (pDevName == NULL)
{
DBG( "Bad net name: %s\n", pDev->mpNetDev->net->name );
return -ENXIO;
}
pDevName += strlen("usb");
QCQMIIndex = simple_strtoul( pDevName, NULL, 10 );
if(QCQMIIndex < 0 )
{
DBG( "Bad minor number\n" );
return -ENXIO;
}
// Always print this output
printk( KERN_INFO "creating qcqmi%d\n",
QCQMIIndex );
#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,27 ))
// kernel 2.6.27 added a new fourth parameter to device_create
// void * drvdata : the data to be added to the device for callbacks
device_create( pDev->mQMIDev.mpDevClass,
NULL,
devno,
NULL,
"qcqmi%d",
QCQMIIndex );
#else
device_create( pDev->mQMIDev.mpDevClass,
NULL,
devno,
"qcqmi%d",
QCQMIIndex );
#endif
pDev->mQMIDev.mDevNum = devno;
// Success
return 0;
}
/*===========================================================================
METHOD:
DeregisterQMIDevice (Public Method)
DESCRIPTION:
QMI Device cleanup function
NOTE: When this function is run the device is no longer valid
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
None
===========================================================================*/
void DeregisterQMIDevice( sQCUSBNet * pDev )
{
struct inode * pOpenInode;
struct list_head * pInodeList;
struct list_head * pTasks;
struct task_struct * pEachTask;
struct fdtable * pFDT;
struct file * pFilp;
unsigned long flags;
int count = 0;
// Should never happen, but check anyway
if (IsDeviceValid( pDev ) == false)
{
DBG( "wrong device\n" );
return;
}
// Release all clients
while (pDev->mQMIDev.mpClientMemList != NULL)
{
DBG( "release 0x%04X\n", pDev->mQMIDev.mpClientMemList->mClientID );
ReleaseClientID( pDev,
pDev->mQMIDev.mpClientMemList->mClientID );
// NOTE: pDev->mQMIDev.mpClientMemList will
// be updated in ReleaseClientID()
}
// Stop all reads
KillRead( pDev );
pDev->mbQMIValid = false;
// Find each open file handle, and manually close it
// Generally there will only be only one inode, but more are possible
list_for_each( pInodeList, &pDev->mQMIDev.mCdev.list )
{
// Get the inode
pOpenInode = container_of( pInodeList, struct inode, i_devices );
if (pOpenInode != NULL && (IS_ERR( pOpenInode ) == false))
{
// Look for this inode in each task
// "group_leader" points to the main process' task, which resides in
// the global "tasks" list.
list_for_each( pTasks, ¤t->group_leader->tasks )
{
pEachTask = container_of( pTasks, struct task_struct, tasks );
if (pEachTask == NULL || pEachTask->files == NULL)
{
// Some tasks may not have files (e.g. Xsession)
continue;
}
// For each file this task has open, check if it's referencing
// our inode.
spin_lock_irqsave( &pEachTask->files->file_lock, flags );
pFDT = files_fdtable( pEachTask->files );
for (count = 0; count < pFDT->max_fds; count++)
{
pFilp = pFDT->fd[count];
if (pFilp != NULL && pFilp->f_dentry != NULL )
{
if (pFilp->f_dentry->d_inode == pOpenInode)
{
// Close this file handle
rcu_assign_pointer( pFDT->fd[count], NULL );
spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
DBG( "forcing close of open file handle\n" );
filp_close( pFilp, pEachTask->files );
spin_lock_irqsave( &pEachTask->files->file_lock, flags );
}
}
}
spin_unlock_irqrestore( &pEachTask->files->file_lock, flags );
}
}
}
// Remove device (so no more calls can be made by users)
if (IS_ERR(pDev->mQMIDev.mpDevClass) == false)
{
device_destroy( pDev->mQMIDev.mpDevClass,
pDev->mQMIDev.mDevNum );
}
cdev_del( &pDev->mQMIDev.mCdev );
unregister_chrdev_region( pDev->mQMIDev.mDevNum, 1 );
return;
}
/*=========================================================================*/
// Driver level client management
/*=========================================================================*/
/*===========================================================================
METHOD:
QMIReady (Public Method)
DESCRIPTION:
Send QMI CTL GET VERSION INFO REQ
Wait for response or timeout
PARAMETERS:
pDev [ I ] - Device specific memory
timeout [ I ] - Milliseconds to wait for response
RETURN VALUE:
bool
===========================================================================*/
bool QMIReady(
sQCUSBNet * pDev,
u16 timeout )
{
int result;
void * pWriteBuffer;
u16 writeBufferSize;
void * pReadBuffer;
u16 readBufferSize;
struct semaphore readSem;
u16 curTime;
unsigned long flags;
u8 transactionID;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return -EFAULT;
}
writeBufferSize = QMICTLReadyReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
// An implimentation of down_timeout has not been agreed on,
// so it's been added and removed from the kernel several times.
// We're just going to ignore it and poll the semaphore.
// Send a write every 100 ms and see if we get a response
for (curTime = 0; curTime < timeout; curTime += 100)
{
// Start read
sema_init( &readSem, 0 );
transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
if (transactionID == 0)
{
transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID );
}
result = ReadAsync( pDev, QMICTL, transactionID, UpSem, &readSem );
if (result != 0)
{
return false;
}
// Fill buffer
result = QMICTLReadyReq( pWriteBuffer,
writeBufferSize,
transactionID );
if (result < 0)
{
kfree( pWriteBuffer );
return false;
}
// Disregard status. On errors, just try again
WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
QMICTL );
msleep( 100 );
if (down_trylock( &readSem ) == 0)
{
// Enter critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Pop the read data
if (PopFromReadMemList( pDev,
QMICTL,
transactionID,
&pReadBuffer,
&readBufferSize ) == true)
{
// Success
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
// We don't care about the result
kfree( pReadBuffer );
break;
}
}
else
{
// Enter critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
// Timeout, remove the async read
NotifyAndPopNotifyList( pDev, QMICTL, transactionID );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
}
}
kfree( pWriteBuffer );
// Did we time out?
if (curTime >= timeout)
{
return false;
}
DBG( "QMI Ready after %u milliseconds\n", curTime );
// TODO: 3580 and newer firmware does not require this delay
msleep( 5000 );
// Success
return true;
}
/*===========================================================================
METHOD:
QMIWDSCallback (Public Method)
DESCRIPTION:
QMI WDS callback function
Update net stats or link state
PARAMETERS:
pDev [ I ] - Device specific memory
clientID [ I ] - Client ID
pData [ I ] - Callback data (unused)
RETURN VALUE:
None
===========================================================================*/
void QMIWDSCallback(
sQCUSBNet * pDev,
u16 clientID,
void * pData )
{
bool bRet;
int result;
void * pReadBuffer;
u16 readBufferSize;
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 ))
struct net_device_stats * pStats = &(pDev->mpNetDev->stats);
#else
struct net_device_stats * pStats = &(pDev->mpNetDev->net->stats);
#endif
u32 TXOk = (u32)-1;
u32 RXOk = (u32)-1;
u32 TXErr = (u32)-1;
u32 RXErr = (u32)-1;
u32 TXOfl = (u32)-1;
u32 RXOfl = (u32)-1;
u64 TXBytesOk = (u64)-1;
u64 RXBytesOk = (u64)-1;
bool bLinkState;
bool bReconfigure;
unsigned long flags;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return;
}
// Critical section
spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags );
bRet = PopFromReadMemList( pDev,
clientID,
0,
&pReadBuffer,
&readBufferSize );
// End critical section
spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags );
if (bRet == false)
{
DBG( "WDS callback failed to get data\n" );
return;
}
// Default values
bLinkState = ! QTestDownReason( pDev, NO_NDIS_CONNECTION );
bReconfigure = false;
result = QMIWDSEventResp( pReadBuffer,
readBufferSize,
&TXOk,
&RXOk,
&TXErr,
&RXErr,
&TXOfl,
&RXOfl,
&TXBytesOk,
&RXBytesOk,
&bLinkState,
&bReconfigure );
if (result < 0)
{
DBG( "bad WDS packet\n" );
}
else
{
// Fill in new values, ignore max values
if (TXOfl != (u32)-1)
{
pStats->tx_fifo_errors = TXOfl;
}
if (RXOfl != (u32)-1)
{
pStats->rx_fifo_errors = RXOfl;
}
if (TXErr != (u32)-1)
{
pStats->tx_errors = TXErr;
}
if (RXErr != (u32)-1)
{
pStats->rx_errors = RXErr;
}
if (TXOk != (u32)-1)
{
pStats->tx_packets = TXOk + pStats->tx_errors;
}
if (RXOk != (u32)-1)
{
pStats->rx_packets = RXOk + pStats->rx_errors;
}
if (TXBytesOk != (u64)-1)
{
pStats->tx_bytes = TXBytesOk;
}
if (RXBytesOk != (u64)-1)
{
pStats->rx_bytes = RXBytesOk;
}
if (bReconfigure == true)
{
DBG( "Net device link reset\n" );
QSetDownReason( pDev, NO_NDIS_CONNECTION );
QClearDownReason( pDev, NO_NDIS_CONNECTION );
}
else
{
if (bLinkState == true)
{
DBG( "Net device link is connected\n" );
QClearDownReason( pDev, NO_NDIS_CONNECTION );
}
else
{
DBG( "Net device link is disconnected\n" );
QSetDownReason( pDev, NO_NDIS_CONNECTION );
}
}
}
kfree( pReadBuffer );
// Setup next read
result = ReadAsync( pDev,
clientID,
0,
QMIWDSCallback,
pData );
if (result != 0)
{
DBG( "unable to setup next async read\n" );
}
return;
}
/*===========================================================================
METHOD:
SetupQMIWDSCallback (Public Method)
DESCRIPTION:
Request client and fire off reqests and start async read for
QMI WDS callback
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
int - 0 for success
Negative errno for failure
===========================================================================*/
int SetupQMIWDSCallback( sQCUSBNet * pDev )
{
int result;
void * pWriteBuffer;
u16 writeBufferSize;
u16 WDSClientID;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return -EFAULT;
}
result = GetClientID( pDev, QMIWDS );
if (result < 0)
{
return result;
}
WDSClientID = result;
// QMI WDS Set Event Report
writeBufferSize = QMIWDSSetEventReportReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
result = QMIWDSSetEventReportReq( pWriteBuffer,
writeBufferSize,
1 );
if (result < 0)
{
kfree( pWriteBuffer );
return result;
}
result = WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
WDSClientID );
kfree( pWriteBuffer );
if (result < 0)
{
return result;
}
// QMI WDS Get PKG SRVC Status
writeBufferSize = QMIWDSGetPKGSRVCStatusReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
result = QMIWDSGetPKGSRVCStatusReq( pWriteBuffer,
writeBufferSize,
2 );
if (result < 0)
{
kfree( pWriteBuffer );
return result;
}
result = WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
WDSClientID );
kfree( pWriteBuffer );
if (result < 0)
{
return result;
}
// Setup asnyc read callback
result = ReadAsync( pDev,
WDSClientID,
0,
QMIWDSCallback,
NULL );
if (result != 0)
{
DBG( "unable to setup async read\n" );
return result;
}
// Send SetControlLineState request (USB_CDC)
// Required for Autoconnect
result = usb_control_msg( pDev->mpNetDev->udev,
usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ),
0x22,
0x21,
1, // DTR present
0,
NULL,
0,
100 );
if (result < 0)
{
DBG( "Bad SetControlLineState status %d\n", result );
return result;
}
return 0;
}
/*===========================================================================
METHOD:
QMIDMSGetMEID (Public Method)
DESCRIPTION:
Register DMS client
send MEID req and parse response
Release DMS client
PARAMETERS:
pDev [ I ] - Device specific memory
RETURN VALUE:
None
===========================================================================*/
int QMIDMSGetMEID( sQCUSBNet * pDev )
{
int result;
void * pWriteBuffer;
u16 writeBufferSize;
void * pReadBuffer;
u16 readBufferSize;
u16 DMSClientID;
if (IsDeviceValid( pDev ) == false)
{
DBG( "Invalid device\n" );
return -EFAULT;
}
result = GetClientID( pDev, QMIDMS );
if (result < 0)
{
return result;
}
DMSClientID = result;
// QMI DMS Get Serial numbers Req
writeBufferSize = QMIDMSGetMEIDReqSize();
pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL );
if (pWriteBuffer == NULL)
{
return -ENOMEM;
}
result = QMIDMSGetMEIDReq( pWriteBuffer,
writeBufferSize,
1 );
if (result < 0)
{
kfree( pWriteBuffer );
return result;
}
result = WriteSync( pDev,
pWriteBuffer,
writeBufferSize,
DMSClientID );
kfree( pWriteBuffer );
if (result < 0)
{
return result;
}
// QMI DMS Get Serial numbers Resp
result = ReadSync( pDev,
&pReadBuffer,
DMSClientID,
1 );
if (result < 0)
{
return result;
}
readBufferSize = result;
result = QMIDMSGetMEIDResp( pReadBuffer,
readBufferSize,
&pDev->mMEID[0],
14 );
kfree( pReadBuffer );
if (result < 0)
{
DBG( "bad get MEID resp\n" );
// Non fatal error, device did not return any MEID
// Fill with 0's
memset( &pDev->mMEID[0], '0', 14 );
}
ReleaseClientID( pDev, DMSClientID );
// Success
return 0;
}
| gpl-2.0 |
honor6-dev/android_kernel_huawei_h60 | net/tipc/netlink.c | 1394 | 3499 | /*
* net/tipc/netlink.c: TIPC configuration handling
*
* Copyright (c) 2005-2006, Ericsson AB
* Copyright (c) 2005-2007, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "config.h"
#include <net/genetlink.h>
static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *rep_buf;
struct nlmsghdr *rep_nlh;
struct nlmsghdr *req_nlh = info->nlhdr;
struct tipc_genlmsghdr *req_userhdr = info->userhdr;
int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
u16 cmd;
if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
cmd = TIPC_CMD_NOT_NET_ADMIN;
else
cmd = req_userhdr->cmd;
rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
hdr_space);
if (rep_buf) {
skb_push(rep_buf, hdr_space);
rep_nlh = nlmsg_hdr(rep_buf);
memcpy(rep_nlh, req_nlh, hdr_space);
rep_nlh->nlmsg_len = rep_buf->len;
genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
}
return 0;
}
static struct genl_family tipc_genl_family = {
.id = GENL_ID_GENERATE,
.name = TIPC_GENL_NAME,
.version = TIPC_GENL_VERSION,
.hdrsize = TIPC_GENL_HDRLEN,
.maxattr = 0,
};
static struct genl_ops tipc_genl_ops = {
.cmd = TIPC_GENL_CMD,
.doit = handle_cmd,
};
static int tipc_genl_family_registered;
int tipc_netlink_start(void)
{
int res;
res = genl_register_family_with_ops(&tipc_genl_family,
&tipc_genl_ops, 1);
if (res) {
pr_err("Failed to register netlink interface\n");
return res;
}
tipc_genl_family_registered = 1;
return 0;
}
void tipc_netlink_stop(void)
{
if (!tipc_genl_family_registered)
return;
genl_unregister_family(&tipc_genl_family);
tipc_genl_family_registered = 0;
}
| gpl-2.0 |
bonezuk/linux | drivers/media/rc/keymaps/rc-asus-pc39.c | 1906 | 2443 | /* asus-pc39.h - Keytable for asus_pc39 Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Marc Fargas <telenieko@telenieko.com>
* this is the remote control that comes with the asus p7131
* which has a label saying is "Model PC-39"
*/
static struct rc_map_table asus_pc39[] = {
/* Keys 0 to 9 */
{ 0x082a, KEY_0 },
{ 0x0816, KEY_1 },
{ 0x0812, KEY_2 },
{ 0x0814, KEY_3 },
{ 0x0836, KEY_4 },
{ 0x0832, KEY_5 },
{ 0x0834, KEY_6 },
{ 0x080e, KEY_7 },
{ 0x080a, KEY_8 },
{ 0x080c, KEY_9 },
{ 0x0801, KEY_RADIO }, /* radio */
{ 0x083c, KEY_MENU }, /* dvd/menu */
{ 0x0815, KEY_VOLUMEUP },
{ 0x0826, KEY_VOLUMEDOWN },
{ 0x0808, KEY_UP },
{ 0x0804, KEY_DOWN },
{ 0x0818, KEY_LEFT },
{ 0x0810, KEY_RIGHT },
{ 0x081a, KEY_VIDEO }, /* video */
{ 0x0806, KEY_AUDIO }, /* music */
{ 0x081e, KEY_TV }, /* tv */
{ 0x0822, KEY_EXIT }, /* back */
{ 0x0835, KEY_CHANNELUP }, /* channel / program + */
{ 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
{ 0x0825, KEY_ENTER }, /* enter */
{ 0x0839, KEY_PAUSE }, /* play/pause */
{ 0x0821, KEY_PREVIOUS }, /* rew */
{ 0x0819, KEY_NEXT }, /* forward */
{ 0x0831, KEY_REWIND }, /* backward << */
{ 0x0805, KEY_FASTFORWARD }, /* forward >> */
{ 0x0809, KEY_STOP },
{ 0x0811, KEY_RECORD }, /* recording */
{ 0x0829, KEY_POWER }, /* the button that reads "close" */
{ 0x082e, KEY_ZOOM }, /* full screen */
{ 0x082c, KEY_MACRO }, /* recall */
{ 0x081c, KEY_HOME }, /* home */
{ 0x083a, KEY_PVR }, /* picture */
{ 0x0802, KEY_MUTE }, /* mute */
{ 0x083e, KEY_DVD }, /* dvd */
};
static struct rc_map_list asus_pc39_map = {
.map = {
.scan = asus_pc39,
.size = ARRAY_SIZE(asus_pc39),
.rc_type = RC_TYPE_RC5,
.name = RC_MAP_ASUS_PC39,
}
};
static int __init init_rc_map_asus_pc39(void)
{
return rc_map_register(&asus_pc39_map);
}
static void __exit exit_rc_map_asus_pc39(void)
{
rc_map_unregister(&asus_pc39_map);
}
module_init(init_rc_map_asus_pc39)
module_exit(exit_rc_map_asus_pc39)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| gpl-2.0 |
EPDCenter/android_kernel_archos_97b_Titan | drivers/acpi/processor_idle.c | 1906 | 29533 | /*
* processor_idle - idle state submodule to the ACPI processor driver
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* - Added processor hotplug support
* Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added support for C3 on SMP
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
* asm/acpi.h is not an option, as it would require more include magic. Also
* creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
*/
#ifdef CONFIG_X86
#include <asm/apic.h>
#endif
#include <asm/io.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
#include <asm/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
#define C2_OVERHEAD 1 /* 1us */
#define C3_OVERHEAD 1 /* 1us */
#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000);
static int bm_check_disable __read_mostly;
module_param(bm_check_disable, uint, 0000);
static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644);
static int disabled_by_idle_boot_param(void)
{
return boot_option_idle_override == IDLE_POLL ||
boot_option_idle_override == IDLE_FORCE_MWAIT ||
boot_option_idle_override == IDLE_HALT;
}
/*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else.
*
* To skip this limit, boot/load with a large max_cstate limit.
*/
static int set_max_cstate(const struct dmi_system_id *id)
{
if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0;
printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
" Override with \"processor.max_cstate=%d\"\n", id->ident,
(long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
max_cstate = (long)id->driver_data;
return 0;
}
/* Actually this shouldn't be __cpuinitdata, would be better to fix the
callers to only run once -AK */
static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
{ set_max_cstate, "Clevo 5600D", {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
{ set_max_cstate, "Pavilion zv5000", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
(void *)1},
{ set_max_cstate, "Asus L8400B", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
{},
};
/*
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
* test NEED_RESCHED:
*/
smp_mb();
if (!need_resched()) {
safe_halt();
local_irq_disable();
}
current_thread_info()->status |= TS_POLLING;
}
#ifdef ARCH_APICTIMER_STOPS_ON_C3
/*
* Some BIOS implementations switch to C3 in the published C2 state.
* This seems to be a common problem on AMD boxen, but other vendors
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
* Check, if one of the previous states already marked the lapic
* unstable
*/
if (pwr->timer_broadcast_on_state < state)
return;
if (cx->type >= type)
pr->power.timer_broadcast_on_state = state;
}
static void __lapic_timer_propagate_broadcast(void *arg)
{
struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ?
CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
clockevents_notify(reason, &pr->id);
}
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
(void *)pr, 1);
}
/* Power(C) State timer broadcast control */
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
int state = cx - pr->power.states;
if (state >= pr->power.timer_broadcast_on_state) {
unsigned long reason;
reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
clockevents_notify(reason, &pr->id);
}
}
#else
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
}
#endif
/*
* Suspend / resume control
*/
static int acpi_idle_suspend;
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
}
static void acpi_idle_bm_rld_restore(void)
{
u32 resumed_bm_rld;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
if (resumed_bm_rld != saved_bm_rld)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
if (acpi_idle_suspend == 1)
return 0;
acpi_idle_bm_rld_save();
acpi_idle_suspend = 1;
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
if (acpi_idle_suspend == 0)
return 0;
acpi_idle_bm_rld_restore();
acpi_idle_suspend = 0;
return 0;
}
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return;
/*FALL THROUGH*/
default:
/* TSC could halt in idle, so notify users */
if (state > ACPI_STATE_C1)
mark_tsc_unstable("TSC halts in idle");
}
}
#else
static void tsc_check_state(int state) { return; }
#endif
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
{
if (!pr)
return -EINVAL;
if (!pr->pblk)
return -ENODEV;
/* if info is obtained from pblk/fadt, type equals state */
pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
#ifndef CONFIG_HOTPLUG_CPU
/*
* Check for P_LVL2_UP flag before entering C2 and above on
* an SMP system.
*/
if ((num_online_cpus() > 1) &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
return -ENODEV;
#endif
/* determine C2 and C3 address from pblk */
pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
/*
* FADT specified C2 latency must be less than or equal to
* 100 microseconds.
*/
if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
/* invalidate C2 */
pr->power.states[ACPI_STATE_C2].address = 0;
}
/*
* FADT supplied C3 latency must be less than or equal to
* 1000 microseconds.
*/
if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
/* invalidate C3 */
pr->power.states[ACPI_STATE_C3].address = 0;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
pr->power.states[ACPI_STATE_C3].address));
return 0;
}
static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
{
if (!pr->power.states[ACPI_STATE_C1].valid) {
/* set the first C-State to C1 */
/* all processors need to support C1 */
pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
pr->power.states[ACPI_STATE_C1].valid = 1;
pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
}
/* the C0 state only exists as a filler in our array */
pr->power.states[ACPI_STATE_C0].valid = 1;
return 0;
}
static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
{
acpi_status status = 0;
u64 count;
int current_count;
int i;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *cst;
if (nocst)
return -ENODEV;
current_count = 0;
status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
return -ENODEV;
}
cst = buffer.pointer;
/* There must be at least 2 elements */
if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
printk(KERN_ERR PREFIX "not enough elements in _CST\n");
status = -EFAULT;
goto end;
}
count = cst->package.elements[0].integer.value;
/* Validate number of power states. */
if (count < 1 || count != cst->package.count - 1) {
printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
status = -EFAULT;
goto end;
}
/* Tell driver that at least _CST is supported. */
pr->flags.has_cst = 1;
for (i = 1; i <= count; i++) {
union acpi_object *element;
union acpi_object *obj;
struct acpi_power_register *reg;
struct acpi_processor_cx cx;
memset(&cx, 0, sizeof(cx));
element = &(cst->package.elements[i]);
if (element->type != ACPI_TYPE_PACKAGE)
continue;
if (element->package.count != 4)
continue;
obj = &(element->package.elements[0]);
if (obj->type != ACPI_TYPE_BUFFER)
continue;
reg = (struct acpi_power_register *)obj->buffer.pointer;
if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
(reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
continue;
/* There should be an easy way to extract an integer... */
obj = &(element->package.elements[1]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.type = obj->integer.value;
/*
* Some buggy BIOSes won't list C1 in _CST -
* Let acpi_processor_get_power_info_default() handle them later
*/
if (i == 1 && cx.type != ACPI_STATE_C1)
current_count++;
cx.address = reg->address;
cx.index = current_count + 1;
cx.entry_method = ACPI_CSTATE_SYSTEMIO;
if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_ffh_cstate_probe
(pr->id, &cx, reg) == 0) {
cx.entry_method = ACPI_CSTATE_FFH;
} else if (cx.type == ACPI_STATE_C1) {
/*
* C1 is a special case where FIXED_HARDWARE
* can be handled in non-MWAIT way as well.
* In that case, save this _CST entry info.
* Otherwise, ignore this info and continue.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
continue;
}
if (cx.type == ACPI_STATE_C1 &&
(boot_option_idle_override == IDLE_NOMWAIT)) {
/*
* In most cases the C1 space_id obtained from
* _CST object is FIXED_HARDWARE access mode.
* But when the option of idle=halt is added,
* the entry_method type should be changed from
* CSTATE_FFH to CSTATE_HALT.
* When the option of idle=nomwait is added,
* the C1 entry_method type should be
* CSTATE_HALT.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
}
} else {
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
cx.address);
}
if (cx.type == ACPI_STATE_C1) {
cx.valid = 1;
}
obj = &(element->package.elements[2]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.latency = obj->integer.value;
obj = &(element->package.elements[3]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
cx.power = obj->integer.value;
current_count++;
memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
/*
* We support total ACPI_PROCESSOR_MAX_POWER - 1
* (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
*/
if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
printk(KERN_WARNING
"Limiting number of power states to max (%d)\n",
ACPI_PROCESSOR_MAX_POWER);
printk(KERN_WARNING
"Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break;
}
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
current_count));
/* Validate number of power states discovered */
if (current_count < 2)
status = -EFAULT;
end:
kfree(buffer.pointer);
return status;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
static int bm_check_flag = -1;
static int bm_control_flag = -1;
if (!cx->address)
return;
/*
* PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
* DMA transfers are used by any ISA device to avoid livelock.
* Note that we could disable Type-F DMA (as recommended by
* the erratum), but this is known to disrupt certain ISA
* devices thus we take the conservative approach.
*/
else if (errata.piix4.fdma) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 not supported on PIIX4 with Type-F DMA\n"));
return;
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
if (!pr->flags.bm_control) {
if (pr->flags.has_cst != 1) {
/* bus mastering control is necessary */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 support requires BM control\n"));
return;
} else {
/* Here we enter C3 without bus mastering */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 support without BM control\n"));
}
}
} else {
/*
* WBINVD should be set in fadt, for C3 state to be
* supported on when bm_check is not required.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Cache invalidation should work properly"
" for C3 to be enabled on SMP systems\n"));
return;
}
}
/*
* Otherwise we've met all of our C3 requirements.
* Normalize the C3 latency to expidite policy. Enable
* checking of bus mastering status (bm_check) so we can
* use this in our C3 policy
*/
cx->valid = 1;
cx->latency_ticks = cx->latency;
/*
* On older chipsets, BM_RLD needs to be set
* in order for Bus Master activity to wake the
* system from C3. Newer chipsets handle DMA
* during C3 automatically and BM_RLD is a NOP.
* In either case, the proper way to
* handle BM_RLD is to set it and leave it set.
*/
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
return;
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
pr->power.timer_broadcast_on_state = INT_MAX;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
break;
case ACPI_STATE_C2:
if (!cx->address)
break;
cx->valid = 1;
cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
break;
}
if (!cx->valid)
continue;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
lapic_timer_propagate_broadcast(pr);
return (working);
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
{
unsigned int i;
int result;
/* NOTE: the idle thread may not be running while calling
* this function */
/* Zero initialize all the C-states info. */
memset(pr->power.states, 0, sizeof(pr->power.states));
result = acpi_processor_get_power_info_cst(pr);
if (result == -ENODEV)
result = acpi_processor_get_power_info_fadt(pr);
if (result)
return result;
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
/*
* if one state of type C2 or C3 is available, mark this
* CPU as being "idle manageable"
*/
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
if (pr->power.states[i].valid) {
pr->power.count = i;
if (pr->power.states[i].type >= ACPI_STATE_C2)
pr->flags.power = 1;
}
}
return 0;
}
/**
* acpi_idle_bm_check - checks if bus master activity was detected
*/
static int acpi_idle_bm_check(void)
{
u32 bm_status = 0;
if (bm_check_disable)
return 0;
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
if (bm_status)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
/*
* PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
* the true state of bus mastering activity; forcing us to
* manually check the BMIDEA bit of each IDE channel.
*/
else if (errata.piix4.bmisx) {
if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
|| (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
bm_status = 1;
}
return bm_status;
}
/**
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
* @cx: cstate data
*
* Caller disables interrupt before call and enables interrupt after return.
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
/* IO port based C-state */
inb(cx->address);
/* Dummy wait op - must do something useless after P_LVL2 read
because chipsets cannot guarantee that STPCLK# signal
gets asserted in time to freeze execution properly. */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
start_critical_timings();
}
/**
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
* @dev: the target CPU
* @state: the state data
*
* This is equivalent to the HALT instruction.
*/
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
ktime_t kt1, kt2;
s64 idle_time;
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return 0;
local_irq_disable();
/* Do not access any ACPI IO ports in suspend path */
if (acpi_idle_suspend) {
local_irq_enable();
cpu_relax();
return 0;
}
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
local_irq_enable();
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return idle_time;
}
/**
* acpi_idle_enter_simple - enters an ACPI state without BM handling
* @dev: the target CPU
* @state: the state data
*/
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
}
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return idle_time;
}
static int c3_cpu_count;
static DEFINE_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
* @dev: the target CPU
* @state: the state data
*
* If BM is detected, the deepest non-C3 idle state is entered instead.
*/
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
pr = __this_cpu_read(processors);
if (unlikely(!pr))
return 0;
if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state));
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
if (dev->safe_state) {
dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
return 0;
}
}
local_irq_disable();
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
local_irq_enable();
return 0;
}
}
acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/*
* Must be done before busmaster disable as we might need to
* access HPET !
*/
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
acpi_idle_do_entry(cx);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
cx->time += idle_time;
return idle_time;
}
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
/**
* acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
* @pr: the ACPI processor
*/
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_device *dev = &pr->power.dev;
if (!pr->flags.power_setup_done)
return -EINVAL;
if (pr->flags.power == 0) {
return -EINVAL;
}
dev->cpu = pr->id;
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
dev->states[i].name[0] = '\0';
dev->states[i].desc[0] = '\0';
}
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
state = &dev->states[count];
if (!cx->valid)
continue;
#ifdef CONFIG_HOTPLUG_CPU
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
!pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
cpuidle_set_statedata(state, cx);
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
if (cx->entry_method == ACPI_CSTATE_FFH)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
dev->safe_state = state;
break;
case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple;
dev->safe_state = state;
break;
case ACPI_STATE_C3:
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = pr->flags.bm_check ?
acpi_idle_enter_bm :
acpi_idle_enter_simple;
break;
}
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
dev->state_count = count;
if (!count)
return -EINVAL;
return 0;
}
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
int ret = 0;
if (disabled_by_idle_boot_param())
return 0;
if (!pr)
return -EINVAL;
if (nocst) {
return -ENODEV;
}
if (!pr->flags.power_setup_done)
return -ENODEV;
cpuidle_pause_and_lock();
cpuidle_disable_device(&pr->power.dev);
acpi_processor_get_power_info(pr);
if (pr->flags.power) {
acpi_processor_setup_cpuidle(pr);
ret = cpuidle_enable_device(&pr->power.dev);
}
cpuidle_resume_and_unlock();
return ret;
}
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
static int first_run;
if (disabled_by_idle_boot_param())
return 0;
if (!first_run) {
dmi_check_system(processor_power_dmi_table);
max_cstate = acpi_processor_cstate_check(max_cstate);
if (max_cstate < ACPI_C_STATES_MAX)
printk(KERN_NOTICE
"ACPI: processor limited to max C-state %d\n",
max_cstate);
first_run++;
}
if (!pr)
return -EINVAL;
if (acpi_gbl_FADT.cst_control && !nocst) {
status =
acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Notifying BIOS of _CST ability failed"));
}
}
acpi_processor_get_power_info(pr);
pr->flags.power_setup_done = 1;
/*
* Install the idle handler if processor power management is supported.
* Note that we use previously set idle handler will be used on
* platforms that only support C1.
*/
if (pr->flags.power) {
acpi_processor_setup_cpuidle(pr);
if (cpuidle_register_device(&pr->power.dev))
return -EIO;
}
return 0;
}
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device)
{
if (disabled_by_idle_boot_param())
return 0;
cpuidle_unregister_device(&pr->power.dev);
pr->flags.power_setup_done = 0;
return 0;
}
| gpl-2.0 |
itsmerajit/kernel_otus | arch/arm/mach-msm/smp2p_gpio_test.c | 2162 | 20368 | /* arch/arm/mach-msm/smp2p_gpio_test.c
*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/debugfs.h>
#include <linux/completion.h>
#include <linux/irq.h>
#include <linux/bitmap.h>
#include "smp2p_private.h"
#include "smp2p_test_common.h"
/* Interrupt callback data */
struct gpio_info {
int gpio_base_id;
int irq_base_id;
bool initialized;
struct completion cb_completion;
int cb_count;
DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
};
/* GPIO Inbound/Outbound callback info */
struct gpio_inout {
struct gpio_info in;
struct gpio_info out;
};
static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
/**
* Init/reset the callback data.
*
* @info: Pointer to callback data
*/
static void cb_data_reset(struct gpio_info *info)
{
int n;
if (!info)
return;
if (!info->initialized) {
init_completion(&info->cb_completion);
info->initialized = true;
}
info->cb_count = 0;
for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
clear_bit(n, info->triggered_irqs);
INIT_COMPLETION(info->cb_completion);
}
static int __devinit smp2p_gpio_test_probe(struct platform_device *pdev)
{
int id;
int cnt;
struct device_node *node = pdev->dev.of_node;
struct gpio_info *gpio_info_ptr = NULL;
/*
* NOTE: This does a string-lookup of the GPIO pin name and doesn't
* actually directly link to the SMP2P GPIO driver since all
* GPIO/Interrupt access must be through standard
* Linux GPIO / Interrupt APIs.
*/
if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
} else {
pr_err("%s: unable to match device type '%s'\n",
__func__, node->name);
return -ENODEV;
}
/* retrieve the GPIO and interrupt ID's */
cnt = of_gpio_count(node);
if (cnt && gpio_info_ptr) {
/*
* Instead of looping through all 32-bits, we can just get the
* first pin to get the base IDs. This saves on the verbosity
* of the device tree nodes as well.
*/
id = of_get_gpio(node, 0);
gpio_info_ptr->gpio_base_id = id;
gpio_info_ptr->irq_base_id = gpio_to_irq(id);
}
return 0;
}
/*
* NOTE: Instead of match table and device driver, you may be able to just
* call of_find_compatible_node() in your init function.
*/
static struct of_device_id msm_smp2p_match_table[] __devinitdata = {
/* modem */
{.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
{.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
/* audio (adsp) */
{.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
{.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
/* wcnss */
{.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
{.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
/* mock loopback */
{.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
{.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
{},
};
static struct platform_driver smp2p_gpio_driver = {
.probe = smp2p_gpio_test_probe,
.driver = {
.name = "smp2pgpio_test",
.owner = THIS_MODULE,
.of_match_table = msm_smp2p_match_table,
},
};
/**
* smp2p_ut_local_gpio_out - Verify outbound functionality.
*
* @s: pointer to output file
*/
static void smp2p_ut_local_gpio_out(struct seq_file *s)
{
int failed = 0;
struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
int ret;
int id;
struct msm_smp2p_remote_mock *mock;
seq_printf(s, "Running %s\n", __func__);
do {
/* initialize mock edge */
ret = smp2p_reset_mock_edge();
UT_ASSERT_INT(ret, ==, 0);
mock = msm_smp2p_get_remote_mock();
UT_ASSERT_PTR(mock, !=, NULL);
mock->rx_interrupt_count = 0;
memset(&mock->remote_item, 0,
sizeof(struct smp2p_smem_item));
smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
0, 1);
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
mock->remote_item.header.valid_total_ent, 1);
msm_smp2p_set_remote_mock_exists(true);
mock->tx_interrupt();
/* open GPIO entry */
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, true);
/* verify set/get functions */
UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
int pin = cb_info->gpio_base_id + id;
mock->rx_interrupt_count = 0;
gpio_set_value(pin, 1);
UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
UT_ASSERT_INT(1, ==, gpio_get_value(pin));
gpio_set_value(pin, 0);
UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
UT_ASSERT_INT(0, ==, gpio_get_value(pin));
}
if (failed)
break;
seq_printf(s, "\tOK\n");
} while (0);
if (failed) {
pr_err("%s: Failed\n", __func__);
seq_printf(s, "\tFailed\n");
}
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, false);
}
/**
* smp2p_gpio_irq - Interrupt handler for inbound entries.
*
* @irq: Virtual IRQ being triggered
* @data: Cookie data (struct gpio_info * in this case)
* @returns: Number of bytes written
*/
static irqreturn_t smp2p_gpio_irq(int irq, void *data)
{
struct gpio_info *gpio_ptr = (struct gpio_info *)data;
int offset;
if (!gpio_ptr) {
pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
return IRQ_HANDLED;
}
offset = irq - gpio_ptr->irq_base_id;
if (offset >= 0 && offset < SMP2P_BITS_PER_ENTRY)
set_bit(offset, gpio_ptr->triggered_irqs);
else
pr_err("%s: invalid irq offset base %d; irq %d\n",
__func__, gpio_ptr->irq_base_id, irq);
++gpio_ptr->cb_count;
complete(&gpio_ptr->cb_completion);
return IRQ_HANDLED;
}
/**
* smp2p_ut_local_gpio_in - Verify inbound functionality.
*
* @s: pointer to output file
*/
static void smp2p_ut_local_gpio_in(struct seq_file *s)
{
int failed = 0;
struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
int id;
int ret;
int virq;
struct msm_smp2p_remote_mock *mock;
seq_printf(s, "Running %s\n", __func__);
cb_data_reset(cb_info);
do {
/* initialize mock edge */
ret = smp2p_reset_mock_edge();
UT_ASSERT_INT(ret, ==, 0);
mock = msm_smp2p_get_remote_mock();
UT_ASSERT_PTR(mock, !=, NULL);
mock->rx_interrupt_count = 0;
memset(&mock->remote_item, 0,
sizeof(struct smp2p_smem_item));
smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
0, 1);
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
mock->remote_item.header.valid_total_ent, 1);
msm_smp2p_set_remote_mock_exists(true);
mock->tx_interrupt();
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, true);
/* verify set/get functions locally */
UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
int pin;
int current_value;
/* verify pin value cannot be set */
pin = cb_info->gpio_base_id + id;
current_value = gpio_get_value(pin);
gpio_set_value(pin, 0);
UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
gpio_set_value(pin, 1);
UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
/* verify no interrupts */
UT_ASSERT_INT(0, ==, cb_info->cb_count);
}
if (failed)
break;
/* register for interrupts */
UT_ASSERT_INT(0, <, cb_info->irq_base_id);
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
UT_ASSERT_INT(0, >, (unsigned int)irq_to_desc(virq));
ret = request_irq(virq,
smp2p_gpio_irq, IRQF_TRIGGER_RISING,
"smp2p_test", cb_info);
UT_ASSERT_INT(0, ==, ret);
}
if (failed)
break;
/* verify both rising and falling edge interrupts */
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
cb_data_reset(cb_info);
/* verify rising-edge interrupt */
mock->remote_item.entries[0].entry = 1 << id;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 1);
UT_ASSERT_INT(0, <,
test_bit(id, cb_info->triggered_irqs));
test_bit(id, cb_info->triggered_irqs);
/* verify falling-edge interrupt */
mock->remote_item.entries[0].entry = 0;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 2);
UT_ASSERT_INT(0, <,
test_bit(id, cb_info->triggered_irqs));
}
if (failed)
break;
/* verify rising-edge interrupts */
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
cb_data_reset(cb_info);
/* verify only rising-edge interrupt is triggered */
mock->remote_item.entries[0].entry = 1 << id;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 1);
UT_ASSERT_INT(0, <,
test_bit(id, cb_info->triggered_irqs));
test_bit(id, cb_info->triggered_irqs);
mock->remote_item.entries[0].entry = 0;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 1);
UT_ASSERT_INT(0, <,
test_bit(id, cb_info->triggered_irqs));
}
if (failed)
break;
/* verify falling-edge interrupts */
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
cb_data_reset(cb_info);
/* verify only rising-edge interrupt is triggered */
mock->remote_item.entries[0].entry = 1 << id;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 0);
UT_ASSERT_INT(0, ==,
test_bit(id, cb_info->triggered_irqs));
mock->remote_item.entries[0].entry = 0;
mock->tx_interrupt();
UT_ASSERT_INT(cb_info->cb_count, ==, 1);
UT_ASSERT_INT(0, <,
test_bit(id, cb_info->triggered_irqs));
}
if (failed)
break;
seq_printf(s, "\tOK\n");
} while (0);
if (failed) {
pr_err("%s: Failed\n", __func__);
seq_printf(s, "\tFailed\n");
}
/* unregister for interrupts */
if (cb_info->irq_base_id) {
for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
free_irq(cb_info->irq_base_id + id, cb_info);
}
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, false);
}
/**
* smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
*
* @s: pointer to output file
*
* If the remote side updates the SMP2P bits and sends before negotiation is
* complete, then the UPDATE event will have to be delayed until negotiation is
* complete. This should result in both the OPEN and UPDATE events coming in
* right after each other and the behavior should be transparent to the clients
* of SMP2P GPIO.
*/
static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
{
int failed = 0;
struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
int id;
int ret;
int virq;
struct msm_smp2p_remote_mock *mock;
seq_printf(s, "Running %s\n", __func__);
cb_data_reset(cb_info);
do {
/* initialize mock edge */
ret = smp2p_reset_mock_edge();
UT_ASSERT_INT(ret, ==, 0);
mock = msm_smp2p_get_remote_mock();
UT_ASSERT_PTR(mock, !=, NULL);
mock->rx_interrupt_count = 0;
memset(&mock->remote_item, 0,
sizeof(struct smp2p_smem_item));
smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
0, 1);
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
mock->remote_item.header.valid_total_ent, 1);
/* register for interrupts */
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, true);
UT_ASSERT_INT(0, <, cb_info->irq_base_id);
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
UT_ASSERT_INT(0, >, (unsigned int)irq_to_desc(virq));
ret = request_irq(virq,
smp2p_gpio_irq, IRQ_TYPE_EDGE_BOTH,
"smp2p_test", cb_info);
UT_ASSERT_INT(0, ==, ret);
}
if (failed)
break;
/* update the state value and complete negotiation */
mock->remote_item.entries[0].entry = 0xDEADDEAD;
msm_smp2p_set_remote_mock_exists(true);
mock->tx_interrupt();
/* verify delayed state updates were processed */
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
virq = cb_info->irq_base_id + id;
UT_ASSERT_INT(cb_info->cb_count, >, 0);
if (0x1 & (0xDEADDEAD >> id)) {
/* rising edge should have been triggered */
if (!test_bit(id, cb_info->triggered_irqs)) {
seq_printf(s,
"%s:%d bit %d clear, expected set\n",
__func__, __LINE__, id);
failed = 1;
break;
}
} else {
/* edge should not have been triggered */
if (test_bit(id, cb_info->triggered_irqs)) {
seq_printf(s,
"%s:%d bit %d set, expected clear\n",
__func__, __LINE__, id);
failed = 1;
break;
}
}
}
if (failed)
break;
seq_printf(s, "\tOK\n");
} while (0);
if (failed) {
pr_err("%s: Failed\n", __func__);
seq_printf(s, "\tFailed\n");
}
/* unregister for interrupts */
if (cb_info->irq_base_id) {
for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
free_irq(cb_info->irq_base_id + id, cb_info);
}
smp2p_gpio_open_test_entry("smp2p",
SMP2P_REMOTE_MOCK_PROC, false);
}
/**
* smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
*
* @gpio: gpio test structure
* @mask: 1 = write gpio_value to this GPIO pin
* @gpio_value: value to write to GPIO pin
*/
static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
int gpio_value)
{
int n;
for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
if (mask & 0x1)
gpio_set_value(gpio->gpio_base_id + n, gpio_value);
mask >>= 1;
}
}
static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
{
smp2p_gpio_write_bits(gpio, mask, 1);
}
static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
{
smp2p_gpio_write_bits(gpio, mask, 0);
}
/**
* smp2p_gpio_get_value - reads entire 32-bits of GPIO
*
* @gpio: gpio structure
* @returns: 32 bit value of GPIO pins
*/
static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
{
int n;
uint32_t value = 0;
for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
if (gpio_get_value(gpio->gpio_base_id + n))
value |= 1 << n;
}
return value;
}
/**
* smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
*
* @s: pointer to output file
* @remote_pid: Remote processor to test
* @name: Name of the test for reporting
*
* This test verifies inbound/outbound functionality for the remote processor.
*/
static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
const char *name)
{
int failed = 0;
uint32_t request;
uint32_t response;
struct gpio_info *cb_in;
struct gpio_info *cb_out;
int id;
int ret;
seq_printf(s, "Running %s for '%s' remote pid %d\n",
__func__, smp2p_pid_to_name(remote_pid), remote_pid);
cb_in = &gpio_info[remote_pid].in;
cb_out = &gpio_info[remote_pid].out;
cb_data_reset(cb_in);
cb_data_reset(cb_out);
do {
/* open test entries */
msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
/* register for interrupts */
UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
UT_ASSERT_INT(0, <, cb_in->irq_base_id);
for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
int virq = cb_in->irq_base_id + id;
UT_ASSERT_INT(0, >, (unsigned int)irq_to_desc(virq));
ret = request_irq(virq,
smp2p_gpio_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"smp2p_test", cb_in);
UT_ASSERT_INT(0, ==, ret);
}
if (failed)
break;
/* write echo of data value 0 */
UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
request = 0x0;
SMP2P_SET_RMT_CMD_TYPE(request, 1);
SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
SMP2P_SET_RMT_DATA(request, 0x0);
smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
smp2p_gpio_set_bits(cb_out, request);
UT_ASSERT_INT(cb_in->cb_count, ==, 0);
smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
/* verify response */
do {
/* wait for up to 32 changes */
if (wait_for_completion_timeout(
&cb_in->cb_completion, HZ / 2) == 0)
break;
INIT_COMPLETION(cb_in->cb_completion);
} while (cb_in->cb_count < 32);
UT_ASSERT_INT(cb_in->cb_count, >, 0);
response = smp2p_gpio_get_value(cb_in);
SMP2P_SET_RMT_CMD_TYPE(request, 0);
UT_ASSERT_HEX(request, ==, response);
/* write echo of data value of all 1's */
request = 0x0;
SMP2P_SET_RMT_CMD_TYPE(request, 1);
SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
SMP2P_SET_RMT_DATA(request, ~0);
smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
cb_data_reset(cb_in);
smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
smp2p_gpio_set_bits(cb_out, request);
UT_ASSERT_INT(cb_in->cb_count, ==, 0);
smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
/* verify response including 24 interrupts */
do {
UT_ASSERT_INT(
(int)wait_for_completion_timeout(
&cb_in->cb_completion, HZ / 2),
>, 0);
INIT_COMPLETION(cb_in->cb_completion);
} while (cb_in->cb_count < 24);
response = smp2p_gpio_get_value(cb_in);
SMP2P_SET_RMT_CMD_TYPE(request, 0);
UT_ASSERT_HEX(request, ==, response);
UT_ASSERT_INT(24, ==, cb_in->cb_count);
seq_printf(s, "\tOK\n");
} while (0);
if (failed) {
pr_err("%s: Failed\n", name);
seq_printf(s, "\tFailed\n");
}
/* unregister for interrupts */
if (cb_in->irq_base_id) {
for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
free_irq(cb_in->irq_base_id + id, cb_in);
}
smp2p_gpio_open_test_entry("smp2p", remote_pid, false);
msm_smp2p_init_rmt_lpb_proc(remote_pid);
}
/**
* smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
*
* @s: pointer to output file
*
* This test verifies inbound and outbound functionality for all
* configured remote processor.
*/
static void smp2p_ut_remote_inout(struct seq_file *s)
{
struct smp2p_interrupt_config *int_cfg;
int pid;
int_cfg = smp2p_get_interrupt_config();
if (!int_cfg) {
seq_printf(s, "Remote processor config unavailable\n");
return;
}
for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
if (!int_cfg[pid].is_configured)
continue;
smp2p_ut_remote_inout_core(s, pid, __func__);
}
}
static int __init smp2p_debugfs_init(void)
{
/* register GPIO pins */
(void)platform_driver_register(&smp2p_gpio_driver);
/*
* Add Unit Test entries.
*
* The idea with unit tests is that you can run all of them
* from ADB shell by doing:
* adb shell
* cat ut*
*
* And if particular tests fail, you can then repeatedly run the
* failing tests as you debug and resolve the failing test.
*/
smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
smp2p_debug_create("ut_local_gpio_in_update_open",
smp2p_ut_local_gpio_in_update_open);
smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
return 0;
}
late_initcall(smp2p_debugfs_init);
| gpl-2.0 |
libcg/android_kernel_samsung_smdk4412_hwc | drivers/media/common/tuners/tda18271-common.c | 2930 | 17686 | /*
tda18271-common.c - driver for the Philips / NXP TDA18271 silicon tuner
Copyright (C) 2007, 2008 Michael Krufky <mkrufky@linuxtv.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "tda18271-priv.h"
static int tda18271_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct tda18271_priv *priv = fe->tuner_priv;
enum tda18271_i2c_gate gate;
int ret = 0;
switch (priv->gate) {
case TDA18271_GATE_DIGITAL:
case TDA18271_GATE_ANALOG:
gate = priv->gate;
break;
case TDA18271_GATE_AUTO:
default:
switch (priv->mode) {
case TDA18271_DIGITAL:
gate = TDA18271_GATE_DIGITAL;
break;
case TDA18271_ANALOG:
default:
gate = TDA18271_GATE_ANALOG;
break;
}
}
switch (gate) {
case TDA18271_GATE_ANALOG:
if (fe->ops.analog_ops.i2c_gate_ctrl)
ret = fe->ops.analog_ops.i2c_gate_ctrl(fe, enable);
break;
case TDA18271_GATE_DIGITAL:
if (fe->ops.i2c_gate_ctrl)
ret = fe->ops.i2c_gate_ctrl(fe, enable);
break;
default:
ret = -EINVAL;
break;
}
return ret;
};
/*---------------------------------------------------------------------*/
static void tda18271_dump_regs(struct dvb_frontend *fe, int extended)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
tda_reg("=== TDA18271 REG DUMP ===\n");
tda_reg("ID_BYTE = 0x%02x\n", 0xff & regs[R_ID]);
tda_reg("THERMO_BYTE = 0x%02x\n", 0xff & regs[R_TM]);
tda_reg("POWER_LEVEL_BYTE = 0x%02x\n", 0xff & regs[R_PL]);
tda_reg("EASY_PROG_BYTE_1 = 0x%02x\n", 0xff & regs[R_EP1]);
tda_reg("EASY_PROG_BYTE_2 = 0x%02x\n", 0xff & regs[R_EP2]);
tda_reg("EASY_PROG_BYTE_3 = 0x%02x\n", 0xff & regs[R_EP3]);
tda_reg("EASY_PROG_BYTE_4 = 0x%02x\n", 0xff & regs[R_EP4]);
tda_reg("EASY_PROG_BYTE_5 = 0x%02x\n", 0xff & regs[R_EP5]);
tda_reg("CAL_POST_DIV_BYTE = 0x%02x\n", 0xff & regs[R_CPD]);
tda_reg("CAL_DIV_BYTE_1 = 0x%02x\n", 0xff & regs[R_CD1]);
tda_reg("CAL_DIV_BYTE_2 = 0x%02x\n", 0xff & regs[R_CD2]);
tda_reg("CAL_DIV_BYTE_3 = 0x%02x\n", 0xff & regs[R_CD3]);
tda_reg("MAIN_POST_DIV_BYTE = 0x%02x\n", 0xff & regs[R_MPD]);
tda_reg("MAIN_DIV_BYTE_1 = 0x%02x\n", 0xff & regs[R_MD1]);
tda_reg("MAIN_DIV_BYTE_2 = 0x%02x\n", 0xff & regs[R_MD2]);
tda_reg("MAIN_DIV_BYTE_3 = 0x%02x\n", 0xff & regs[R_MD3]);
/* only dump extended regs if DBG_ADV is set */
if (!(tda18271_debug & DBG_ADV))
return;
/* W indicates write-only registers.
* Register dump for write-only registers shows last value written. */
tda_reg("EXTENDED_BYTE_1 = 0x%02x\n", 0xff & regs[R_EB1]);
tda_reg("EXTENDED_BYTE_2 = 0x%02x\n", 0xff & regs[R_EB2]);
tda_reg("EXTENDED_BYTE_3 = 0x%02x\n", 0xff & regs[R_EB3]);
tda_reg("EXTENDED_BYTE_4 = 0x%02x\n", 0xff & regs[R_EB4]);
tda_reg("EXTENDED_BYTE_5 = 0x%02x\n", 0xff & regs[R_EB5]);
tda_reg("EXTENDED_BYTE_6 = 0x%02x\n", 0xff & regs[R_EB6]);
tda_reg("EXTENDED_BYTE_7 = 0x%02x\n", 0xff & regs[R_EB7]);
tda_reg("EXTENDED_BYTE_8 = 0x%02x\n", 0xff & regs[R_EB8]);
tda_reg("EXTENDED_BYTE_9 W = 0x%02x\n", 0xff & regs[R_EB9]);
tda_reg("EXTENDED_BYTE_10 = 0x%02x\n", 0xff & regs[R_EB10]);
tda_reg("EXTENDED_BYTE_11 = 0x%02x\n", 0xff & regs[R_EB11]);
tda_reg("EXTENDED_BYTE_12 = 0x%02x\n", 0xff & regs[R_EB12]);
tda_reg("EXTENDED_BYTE_13 = 0x%02x\n", 0xff & regs[R_EB13]);
tda_reg("EXTENDED_BYTE_14 = 0x%02x\n", 0xff & regs[R_EB14]);
tda_reg("EXTENDED_BYTE_15 = 0x%02x\n", 0xff & regs[R_EB15]);
tda_reg("EXTENDED_BYTE_16 W = 0x%02x\n", 0xff & regs[R_EB16]);
tda_reg("EXTENDED_BYTE_17 W = 0x%02x\n", 0xff & regs[R_EB17]);
tda_reg("EXTENDED_BYTE_18 = 0x%02x\n", 0xff & regs[R_EB18]);
tda_reg("EXTENDED_BYTE_19 W = 0x%02x\n", 0xff & regs[R_EB19]);
tda_reg("EXTENDED_BYTE_20 W = 0x%02x\n", 0xff & regs[R_EB20]);
tda_reg("EXTENDED_BYTE_21 = 0x%02x\n", 0xff & regs[R_EB21]);
tda_reg("EXTENDED_BYTE_22 = 0x%02x\n", 0xff & regs[R_EB22]);
tda_reg("EXTENDED_BYTE_23 = 0x%02x\n", 0xff & regs[R_EB23]);
}
int tda18271_read_regs(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
unsigned char buf = 0x00;
int ret;
struct i2c_msg msg[] = {
{ .addr = priv->i2c_props.addr, .flags = 0,
.buf = &buf, .len = 1 },
{ .addr = priv->i2c_props.addr, .flags = I2C_M_RD,
.buf = regs, .len = 16 }
};
tda18271_i2c_gate_ctrl(fe, 1);
/* read all registers */
ret = i2c_transfer(priv->i2c_props.adap, msg, 2);
tda18271_i2c_gate_ctrl(fe, 0);
if (ret != 2)
tda_err("ERROR: i2c_transfer returned: %d\n", ret);
if (tda18271_debug & DBG_REG)
tda18271_dump_regs(fe, 0);
return (ret == 2 ? 0 : ret);
}
int tda18271_read_extended(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
unsigned char regdump[TDA18271_NUM_REGS];
unsigned char buf = 0x00;
int ret, i;
struct i2c_msg msg[] = {
{ .addr = priv->i2c_props.addr, .flags = 0,
.buf = &buf, .len = 1 },
{ .addr = priv->i2c_props.addr, .flags = I2C_M_RD,
.buf = regdump, .len = TDA18271_NUM_REGS }
};
tda18271_i2c_gate_ctrl(fe, 1);
/* read all registers */
ret = i2c_transfer(priv->i2c_props.adap, msg, 2);
tda18271_i2c_gate_ctrl(fe, 0);
if (ret != 2)
tda_err("ERROR: i2c_transfer returned: %d\n", ret);
for (i = 0; i < TDA18271_NUM_REGS; i++) {
/* don't update write-only registers */
if ((i != R_EB9) &&
(i != R_EB16) &&
(i != R_EB17) &&
(i != R_EB19) &&
(i != R_EB20))
regs[i] = regdump[i];
}
if (tda18271_debug & DBG_REG)
tda18271_dump_regs(fe, 1);
return (ret == 2 ? 0 : ret);
}
int tda18271_write_regs(struct dvb_frontend *fe, int idx, int len)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
unsigned char buf[TDA18271_NUM_REGS + 1];
struct i2c_msg msg = { .addr = priv->i2c_props.addr, .flags = 0,
.buf = buf };
int i, ret = 1, max;
BUG_ON((len == 0) || (idx + len > sizeof(buf)));
switch (priv->small_i2c) {
case TDA18271_03_BYTE_CHUNK_INIT:
max = 3;
break;
case TDA18271_08_BYTE_CHUNK_INIT:
max = 8;
break;
case TDA18271_16_BYTE_CHUNK_INIT:
max = 16;
break;
case TDA18271_39_BYTE_CHUNK_INIT:
default:
max = 39;
}
tda18271_i2c_gate_ctrl(fe, 1);
while (len) {
if (max > len)
max = len;
buf[0] = idx;
for (i = 1; i <= max; i++)
buf[i] = regs[idx - 1 + i];
msg.len = max + 1;
/* write registers */
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret != 1)
break;
idx += max;
len -= max;
}
tda18271_i2c_gate_ctrl(fe, 0);
if (ret != 1)
tda_err("ERROR: idx = 0x%x, len = %d, "
"i2c_transfer returned: %d\n", idx, max, ret);
return (ret == 1 ? 0 : ret);
}
/*---------------------------------------------------------------------*/
int tda18271_charge_pump_source(struct dvb_frontend *fe,
enum tda18271_pll pll, int force)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
int r_cp = (pll == TDA18271_CAL_PLL) ? R_EB7 : R_EB4;
regs[r_cp] &= ~0x20;
regs[r_cp] |= ((force & 1) << 5);
return tda18271_write_regs(fe, r_cp, 1);
}
int tda18271_init_regs(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
tda_dbg("initializing registers for device @ %d-%04x\n",
i2c_adapter_id(priv->i2c_props.adap),
priv->i2c_props.addr);
/* initialize registers */
switch (priv->id) {
case TDA18271HDC1:
regs[R_ID] = 0x83;
break;
case TDA18271HDC2:
regs[R_ID] = 0x84;
break;
};
regs[R_TM] = 0x08;
regs[R_PL] = 0x80;
regs[R_EP1] = 0xc6;
regs[R_EP2] = 0xdf;
regs[R_EP3] = 0x16;
regs[R_EP4] = 0x60;
regs[R_EP5] = 0x80;
regs[R_CPD] = 0x80;
regs[R_CD1] = 0x00;
regs[R_CD2] = 0x00;
regs[R_CD3] = 0x00;
regs[R_MPD] = 0x00;
regs[R_MD1] = 0x00;
regs[R_MD2] = 0x00;
regs[R_MD3] = 0x00;
switch (priv->id) {
case TDA18271HDC1:
regs[R_EB1] = 0xff;
break;
case TDA18271HDC2:
regs[R_EB1] = 0xfc;
break;
};
regs[R_EB2] = 0x01;
regs[R_EB3] = 0x84;
regs[R_EB4] = 0x41;
regs[R_EB5] = 0x01;
regs[R_EB6] = 0x84;
regs[R_EB7] = 0x40;
regs[R_EB8] = 0x07;
regs[R_EB9] = 0x00;
regs[R_EB10] = 0x00;
regs[R_EB11] = 0x96;
switch (priv->id) {
case TDA18271HDC1:
regs[R_EB12] = 0x0f;
break;
case TDA18271HDC2:
regs[R_EB12] = 0x33;
break;
};
regs[R_EB13] = 0xc1;
regs[R_EB14] = 0x00;
regs[R_EB15] = 0x8f;
regs[R_EB16] = 0x00;
regs[R_EB17] = 0x00;
switch (priv->id) {
case TDA18271HDC1:
regs[R_EB18] = 0x00;
break;
case TDA18271HDC2:
regs[R_EB18] = 0x8c;
break;
};
regs[R_EB19] = 0x00;
regs[R_EB20] = 0x20;
switch (priv->id) {
case TDA18271HDC1:
regs[R_EB21] = 0x33;
break;
case TDA18271HDC2:
regs[R_EB21] = 0xb3;
break;
};
regs[R_EB22] = 0x48;
regs[R_EB23] = 0xb0;
tda18271_write_regs(fe, 0x00, TDA18271_NUM_REGS);
/* setup agc1 gain */
regs[R_EB17] = 0x00;
tda18271_write_regs(fe, R_EB17, 1);
regs[R_EB17] = 0x03;
tda18271_write_regs(fe, R_EB17, 1);
regs[R_EB17] = 0x43;
tda18271_write_regs(fe, R_EB17, 1);
regs[R_EB17] = 0x4c;
tda18271_write_regs(fe, R_EB17, 1);
/* setup agc2 gain */
if ((priv->id) == TDA18271HDC1) {
regs[R_EB20] = 0xa0;
tda18271_write_regs(fe, R_EB20, 1);
regs[R_EB20] = 0xa7;
tda18271_write_regs(fe, R_EB20, 1);
regs[R_EB20] = 0xe7;
tda18271_write_regs(fe, R_EB20, 1);
regs[R_EB20] = 0xec;
tda18271_write_regs(fe, R_EB20, 1);
}
/* image rejection calibration */
/* low-band */
regs[R_EP3] = 0x1f;
regs[R_EP4] = 0x66;
regs[R_EP5] = 0x81;
regs[R_CPD] = 0xcc;
regs[R_CD1] = 0x6c;
regs[R_CD2] = 0x00;
regs[R_CD3] = 0x00;
regs[R_MPD] = 0xcd;
regs[R_MD1] = 0x77;
regs[R_MD2] = 0x08;
regs[R_MD3] = 0x00;
tda18271_write_regs(fe, R_EP3, 11);
if ((priv->id) == TDA18271HDC2) {
/* main pll cp source on */
tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 1);
msleep(1);
/* main pll cp source off */
tda18271_charge_pump_source(fe, TDA18271_MAIN_PLL, 0);
}
msleep(5); /* pll locking */
/* launch detector */
tda18271_write_regs(fe, R_EP1, 1);
msleep(5); /* wanted low measurement */
regs[R_EP5] = 0x85;
regs[R_CPD] = 0xcb;
regs[R_CD1] = 0x66;
regs[R_CD2] = 0x70;
tda18271_write_regs(fe, R_EP3, 7);
msleep(5); /* pll locking */
/* launch optimization algorithm */
tda18271_write_regs(fe, R_EP2, 1);
msleep(30); /* image low optimization completion */
/* mid-band */
regs[R_EP5] = 0x82;
regs[R_CPD] = 0xa8;
regs[R_CD2] = 0x00;
regs[R_MPD] = 0xa9;
regs[R_MD1] = 0x73;
regs[R_MD2] = 0x1a;
tda18271_write_regs(fe, R_EP3, 11);
msleep(5); /* pll locking */
/* launch detector */
tda18271_write_regs(fe, R_EP1, 1);
msleep(5); /* wanted mid measurement */
regs[R_EP5] = 0x86;
regs[R_CPD] = 0xa8;
regs[R_CD1] = 0x66;
regs[R_CD2] = 0xa0;
tda18271_write_regs(fe, R_EP3, 7);
msleep(5); /* pll locking */
/* launch optimization algorithm */
tda18271_write_regs(fe, R_EP2, 1);
msleep(30); /* image mid optimization completion */
/* high-band */
regs[R_EP5] = 0x83;
regs[R_CPD] = 0x98;
regs[R_CD1] = 0x65;
regs[R_CD2] = 0x00;
regs[R_MPD] = 0x99;
regs[R_MD1] = 0x71;
regs[R_MD2] = 0xcd;
tda18271_write_regs(fe, R_EP3, 11);
msleep(5); /* pll locking */
/* launch detector */
tda18271_write_regs(fe, R_EP1, 1);
msleep(5); /* wanted high measurement */
regs[R_EP5] = 0x87;
regs[R_CD1] = 0x65;
regs[R_CD2] = 0x50;
tda18271_write_regs(fe, R_EP3, 7);
msleep(5); /* pll locking */
/* launch optimization algorithm */
tda18271_write_regs(fe, R_EP2, 1);
msleep(30); /* image high optimization completion */
/* return to normal mode */
regs[R_EP4] = 0x64;
tda18271_write_regs(fe, R_EP4, 1);
/* synchronize */
tda18271_write_regs(fe, R_EP1, 1);
return 0;
}
/*---------------------------------------------------------------------*/
/*
* Standby modes, EP3 [7:5]
*
* | SM || SM_LT || SM_XT || mode description
* |=====\\=======\\=======\\===================================
* | 0 || 0 || 0 || normal mode
* |-----||-------||-------||-----------------------------------
* | || || || standby mode w/ slave tuner output
* | 1 || 0 || 0 || & loop thru & xtal oscillator on
* |-----||-------||-------||-----------------------------------
* | 1 || 1 || 0 || standby mode w/ xtal oscillator on
* |-----||-------||-------||-----------------------------------
* | 1 || 1 || 1 || power off
*
*/
int tda18271_set_standby_mode(struct dvb_frontend *fe,
int sm, int sm_lt, int sm_xt)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
if (tda18271_debug & DBG_ADV)
tda_dbg("sm = %d, sm_lt = %d, sm_xt = %d\n", sm, sm_lt, sm_xt);
regs[R_EP3] &= ~0xe0; /* clear sm, sm_lt, sm_xt */
regs[R_EP3] |= (sm ? (1 << 7) : 0) |
(sm_lt ? (1 << 6) : 0) |
(sm_xt ? (1 << 5) : 0);
return tda18271_write_regs(fe, R_EP3, 1);
}
/*---------------------------------------------------------------------*/
int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq)
{
/* sets main post divider & divider bytes, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 d, pd;
u32 div;
int ret = tda18271_lookup_pll_map(fe, MAIN_PLL, &freq, &pd, &d);
if (tda_fail(ret))
goto fail;
regs[R_MPD] = (0x7f & pd);
div = ((d * (freq / 1000)) << 7) / 125;
regs[R_MD1] = 0x7f & (div >> 16);
regs[R_MD2] = 0xff & (div >> 8);
regs[R_MD3] = 0xff & div;
fail:
return ret;
}
int tda18271_calc_cal_pll(struct dvb_frontend *fe, u32 freq)
{
/* sets cal post divider & divider bytes, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 d, pd;
u32 div;
int ret = tda18271_lookup_pll_map(fe, CAL_PLL, &freq, &pd, &d);
if (tda_fail(ret))
goto fail;
regs[R_CPD] = pd;
div = ((d * (freq / 1000)) << 7) / 125;
regs[R_CD1] = 0x7f & (div >> 16);
regs[R_CD2] = 0xff & (div >> 8);
regs[R_CD3] = 0xff & div;
fail:
return ret;
}
/*---------------------------------------------------------------------*/
int tda18271_calc_bp_filter(struct dvb_frontend *fe, u32 *freq)
{
/* sets bp filter bits, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, BP_FILTER, freq, &val);
if (tda_fail(ret))
goto fail;
regs[R_EP1] &= ~0x07; /* clear bp filter bits */
regs[R_EP1] |= (0x07 & val);
fail:
return ret;
}
int tda18271_calc_km(struct dvb_frontend *fe, u32 *freq)
{
/* sets K & M bits, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, RF_CAL_KMCO, freq, &val);
if (tda_fail(ret))
goto fail;
regs[R_EB13] &= ~0x7c; /* clear k & m bits */
regs[R_EB13] |= (0x7c & val);
fail:
return ret;
}
int tda18271_calc_rf_band(struct dvb_frontend *fe, u32 *freq)
{
/* sets rf band bits, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, RF_BAND, freq, &val);
if (tda_fail(ret))
goto fail;
regs[R_EP2] &= ~0xe0; /* clear rf band bits */
regs[R_EP2] |= (0xe0 & (val << 5));
fail:
return ret;
}
int tda18271_calc_gain_taper(struct dvb_frontend *fe, u32 *freq)
{
/* sets gain taper bits, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, GAIN_TAPER, freq, &val);
if (tda_fail(ret))
goto fail;
regs[R_EP2] &= ~0x1f; /* clear gain taper bits */
regs[R_EP2] |= (0x1f & val);
fail:
return ret;
}
int tda18271_calc_ir_measure(struct dvb_frontend *fe, u32 *freq)
{
/* sets IR Meas bits, but does not write them */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, IR_MEASURE, freq, &val);
if (tda_fail(ret))
goto fail;
regs[R_EP5] &= ~0x07;
regs[R_EP5] |= (0x07 & val);
fail:
return ret;
}
int tda18271_calc_rf_cal(struct dvb_frontend *fe, u32 *freq)
{
/* sets rf cal byte (RFC_Cprog), but does not write it */
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
u8 val;
int ret = tda18271_lookup_map(fe, RF_CAL, freq, &val);
/* The TDA18271HD/C1 rf_cal map lookup is expected to go out of range
* for frequencies above 61.1 MHz. In these cases, the internal RF
* tracking filters calibration mechanism is used.
*
* There is no need to warn the user about this.
*/
if (ret < 0)
goto fail;
regs[R_EB14] = val;
fail:
return ret;
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
wwenigma/cocktail-kernel-msm7x30 | arch/arm/mach-shmobile/board-g3evm.c | 2930 | 9346 | /*
* G3EVM board support
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2008 Yoshihiro Shimoda
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
#include <linux/usb/r8a66597.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <mach/sh7367.h>
#include <mach/common.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
/*
* IrDA
*
* S67: 5bit : ON power
* : 6bit : ON remote control
* OFF IrDA
*/
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
},
{
.name = "kernel_ro",
.offset = MTDPART_OFS_APPEND,
.size = 8 * 1024 * 1024,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 8 * 1024 * 1024,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 2,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = 0x00000000,
.end = 0x08000000 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
/* USBHS */
static void usb_host_port_power(int port, int power)
{
if (!power) /* only power-on supported for now */
return;
/* set VBOUT/PWEN and EXTLP0 in DVSTCTR */
__raw_writew(__raw_readw(0xe6890008) | 0x600, 0xe6890008);
}
static struct r8a66597_platdata usb_host_data = {
.on_chip = 1,
.port_power = usb_host_port_power,
};
static struct resource usb_host_resources[] = {
[0] = {
.name = "USBHS",
.start = 0xe6890000,
.end = 0xe68900e5,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xa20), /* USBHS_USHI0 */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb_host_device = {
.name = "r8a66597_hcd",
.id = 0,
.dev = {
.platform_data = &usb_host_data,
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(usb_host_resources),
.resource = usb_host_resources,
};
/* KEYSC */
static struct sh_keysc_info keysc_info = {
.mode = SH_KEYSC_MODE_5,
.scan_timing = 3,
.delay = 100,
.keycodes = {
KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
KEY_WAKEUP, KEY_COFFEE, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4,
KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER,
},
};
static struct resource keysc_resources[] = {
[0] = {
.name = "KEYSC",
.start = 0xe61b0000,
.end = 0xe61b000f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xbe0), /* KEYSC_KEY */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device keysc_device = {
.name = "sh_keysc",
.num_resources = ARRAY_SIZE(keysc_resources),
.resource = keysc_resources,
.dev = {
.platform_data = &keysc_info,
},
};
static struct mtd_partition nand_partition_info[] = {
{
.name = "system",
.offset = 0,
.size = 64 * 1024 * 1024,
},
{
.name = "userdata",
.offset = MTDPART_OFS_APPEND,
.size = 128 * 1024 * 1024,
},
{
.name = "cache",
.offset = MTDPART_OFS_APPEND,
.size = 64 * 1024 * 1024,
},
};
static struct resource nand_flash_resources[] = {
[0] = {
.start = 0xe6a30000,
.end = 0xe6a3009b,
.flags = IORESOURCE_MEM,
}
};
static struct sh_flctl_platform_data nand_flash_data = {
.parts = nand_partition_info,
.nr_parts = ARRAY_SIZE(nand_partition_info),
.flcmncr_val = QTSEL_E | FCKSEL_E | TYPESEL_SET | NANWF_E
| SHBUSSEL | SEL_16BIT,
};
static struct platform_device nand_flash_device = {
.name = "sh_flctl",
.resource = nand_flash_resources,
.num_resources = ARRAY_SIZE(nand_flash_resources),
.dev = {
.platform_data = &nand_flash_data,
},
};
static struct resource irda_resources[] = {
[0] = {
.start = 0xE6D00000,
.end = 0xE6D01FD4 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x480), /* IRDA */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device irda_device = {
.name = "sh_irda",
.id = -1,
.resource = irda_resources,
.num_resources = ARRAY_SIZE(irda_resources),
};
static struct platform_device *g3evm_devices[] __initdata = {
&nor_flash_device,
&usb_host_device,
&keysc_device,
&nand_flash_device,
&irda_device,
};
static struct map_desc g3evm_io_desc[] __initdata = {
/* create a 1:1 entity map for 0xe6xxxxxx
* used by CPGA, INTC and PFC.
*/
{
.virtual = 0xe6000000,
.pfn = __phys_to_pfn(0xe6000000),
.length = 256 << 20,
.type = MT_DEVICE_NONSHARED
},
};
static void __init g3evm_map_io(void)
{
iotable_init(g3evm_io_desc, ARRAY_SIZE(g3evm_io_desc));
/* setup early devices and console here as well */
sh7367_add_early_devices();
shmobile_setup_console();
}
static void __init g3evm_init(void)
{
sh7367_pinmux_init();
/* Lit DS4 LED */
gpio_request(GPIO_PORT22, NULL);
gpio_direction_output(GPIO_PORT22, 1);
gpio_export(GPIO_PORT22, 0);
/* Lit DS8 LED */
gpio_request(GPIO_PORT23, NULL);
gpio_direction_output(GPIO_PORT23, 1);
gpio_export(GPIO_PORT23, 0);
/* Lit DS3 LED */
gpio_request(GPIO_PORT24, NULL);
gpio_direction_output(GPIO_PORT24, 1);
gpio_export(GPIO_PORT24, 0);
/* SCIFA1 */
gpio_request(GPIO_FN_SCIFA1_TXD, NULL);
gpio_request(GPIO_FN_SCIFA1_RXD, NULL);
gpio_request(GPIO_FN_SCIFA1_CTS, NULL);
gpio_request(GPIO_FN_SCIFA1_RTS, NULL);
/* USBHS */
gpio_request(GPIO_FN_VBUS0, NULL);
gpio_request(GPIO_FN_PWEN, NULL);
gpio_request(GPIO_FN_OVCN, NULL);
gpio_request(GPIO_FN_OVCN2, NULL);
gpio_request(GPIO_FN_EXTLP, NULL);
gpio_request(GPIO_FN_IDIN, NULL);
/* setup USB phy */
__raw_writew(0x0300, 0xe605810a); /* USBCR1 */
__raw_writew(0x00e0, 0xe60581c0); /* CPFCH */
__raw_writew(0x6010, 0xe60581c6); /* CGPOSR */
__raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */
/* KEYSC @ CN7 */
gpio_request(GPIO_FN_PORT42_KEYOUT0, NULL);
gpio_request(GPIO_FN_PORT43_KEYOUT1, NULL);
gpio_request(GPIO_FN_PORT44_KEYOUT2, NULL);
gpio_request(GPIO_FN_PORT45_KEYOUT3, NULL);
gpio_request(GPIO_FN_PORT46_KEYOUT4, NULL);
gpio_request(GPIO_FN_PORT47_KEYOUT5, NULL);
gpio_request(GPIO_FN_PORT48_KEYIN0_PU, NULL);
gpio_request(GPIO_FN_PORT49_KEYIN1_PU, NULL);
gpio_request(GPIO_FN_PORT50_KEYIN2_PU, NULL);
gpio_request(GPIO_FN_PORT55_KEYIN3_PU, NULL);
gpio_request(GPIO_FN_PORT56_KEYIN4_PU, NULL);
gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL);
gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL);
/* FLCTL */
gpio_request(GPIO_FN_FCE0, NULL);
gpio_request(GPIO_FN_D0_ED0_NAF0, NULL);
gpio_request(GPIO_FN_D1_ED1_NAF1, NULL);
gpio_request(GPIO_FN_D2_ED2_NAF2, NULL);
gpio_request(GPIO_FN_D3_ED3_NAF3, NULL);
gpio_request(GPIO_FN_D4_ED4_NAF4, NULL);
gpio_request(GPIO_FN_D5_ED5_NAF5, NULL);
gpio_request(GPIO_FN_D6_ED6_NAF6, NULL);
gpio_request(GPIO_FN_D7_ED7_NAF7, NULL);
gpio_request(GPIO_FN_D8_ED8_NAF8, NULL);
gpio_request(GPIO_FN_D9_ED9_NAF9, NULL);
gpio_request(GPIO_FN_D10_ED10_NAF10, NULL);
gpio_request(GPIO_FN_D11_ED11_NAF11, NULL);
gpio_request(GPIO_FN_D12_ED12_NAF12, NULL);
gpio_request(GPIO_FN_D13_ED13_NAF13, NULL);
gpio_request(GPIO_FN_D14_ED14_NAF14, NULL);
gpio_request(GPIO_FN_D15_ED15_NAF15, NULL);
gpio_request(GPIO_FN_WE0_XWR0_FWE, NULL);
gpio_request(GPIO_FN_FRB, NULL);
/* FOE, FCDE, FSC on dedicated pins */
__raw_writel(__raw_readl(0xe6158048) & ~(1 << 15), 0xe6158048);
/* IrDA */
gpio_request(GPIO_FN_IRDA_OUT, NULL);
gpio_request(GPIO_FN_IRDA_IN, NULL);
gpio_request(GPIO_FN_IRDA_FIRSEL, NULL);
sh7367_add_standard_devices();
platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices));
}
static void __init g3evm_timer_init(void)
{
sh7367_clock_init();
shmobile_timer.init();
}
static struct sys_timer g3evm_timer = {
.init = g3evm_timer_init,
};
MACHINE_START(G3EVM, "g3evm")
.map_io = g3evm_map_io,
.init_irq = sh7367_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = g3evm_init,
.timer = &g3evm_timer,
MACHINE_END
| gpl-2.0 |
wwwhana/android_kernel_sony_wukong | drivers/usb/storage/cypress_atacb.c | 3442 | 8242 | /*
* Support for emulating SAT (ata pass through) on devices based
* on the Cypress USB/ATA bridge supporting ATACB.
*
* Copyright (c) 2008 Matthieu Castet (castet.matthieu@free.fr)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h>
#include <linux/ata.h>
#include "usb.h"
#include "protocol.h"
#include "scsiglue.h"
#include "debug.h"
MODULE_DESCRIPTION("SAT support for Cypress USB/ATA bridges with ATACB");
MODULE_AUTHOR("Matthieu Castet <castet.matthieu@free.fr>");
MODULE_LICENSE("GPL");
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
static struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, cypress_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev cypress_unusual_dev_list[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* ATACB is a protocol used on cypress usb<->ata bridge to
* send raw ATA command over mass storage
* There is a ATACB2 protocol that support LBA48 on newer chip.
* More info that be found on cy7c68310_8.pdf and cy7c68300c_8.pdf
* datasheet from cypress.com.
*/
static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
{
unsigned char save_cmnd[MAX_COMMAND_SIZE];
if (likely(srb->cmnd[0] != ATA_16 && srb->cmnd[0] != ATA_12)) {
usb_stor_transparent_scsi_command(srb, us);
return;
}
memcpy(save_cmnd, srb->cmnd, sizeof(save_cmnd));
memset(srb->cmnd, 0, MAX_COMMAND_SIZE);
/* check if we support the command */
if (save_cmnd[1] >> 5) /* MULTIPLE_COUNT */
goto invalid_fld;
/* check protocol */
switch((save_cmnd[1] >> 1) & 0xf) {
case 3: /*no DATA */
case 4: /* PIO in */
case 5: /* PIO out */
break;
default:
goto invalid_fld;
}
/* first build the ATACB command */
srb->cmd_len = 16;
srb->cmnd[0] = 0x24; /* bVSCBSignature : vendor-specific command
this value can change, but most(all ?) manufacturers
keep the cypress default : 0x24 */
srb->cmnd[1] = 0x24; /* bVSCBSubCommand : 0x24 for ATACB */
srb->cmnd[3] = 0xff - 1; /* features, sector count, lba low, lba med
lba high, device, command are valid */
srb->cmnd[4] = 1; /* TransferBlockCount : 512 */
if (save_cmnd[0] == ATA_16) {
srb->cmnd[ 6] = save_cmnd[ 4]; /* features */
srb->cmnd[ 7] = save_cmnd[ 6]; /* sector count */
srb->cmnd[ 8] = save_cmnd[ 8]; /* lba low */
srb->cmnd[ 9] = save_cmnd[10]; /* lba med */
srb->cmnd[10] = save_cmnd[12]; /* lba high */
srb->cmnd[11] = save_cmnd[13]; /* device */
srb->cmnd[12] = save_cmnd[14]; /* command */
if (save_cmnd[1] & 0x01) {/* extended bit set for LBA48 */
/* this could be supported by atacb2 */
if (save_cmnd[3] || save_cmnd[5] || save_cmnd[7] || save_cmnd[9]
|| save_cmnd[11])
goto invalid_fld;
}
}
else { /* ATA12 */
srb->cmnd[ 6] = save_cmnd[3]; /* features */
srb->cmnd[ 7] = save_cmnd[4]; /* sector count */
srb->cmnd[ 8] = save_cmnd[5]; /* lba low */
srb->cmnd[ 9] = save_cmnd[6]; /* lba med */
srb->cmnd[10] = save_cmnd[7]; /* lba high */
srb->cmnd[11] = save_cmnd[8]; /* device */
srb->cmnd[12] = save_cmnd[9]; /* command */
}
/* Filter SET_FEATURES - XFER MODE command */
if ((srb->cmnd[12] == ATA_CMD_SET_FEATURES)
&& (srb->cmnd[6] == SETFEATURES_XFER))
goto invalid_fld;
if (srb->cmnd[12] == ATA_CMD_ID_ATA || srb->cmnd[12] == ATA_CMD_ID_ATAPI)
srb->cmnd[2] |= (1<<7); /* set IdentifyPacketDevice for these cmds */
usb_stor_transparent_scsi_command(srb, us);
/* if the device doesn't support ATACB
*/
if (srb->result == SAM_STAT_CHECK_CONDITION &&
memcmp(srb->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB)) == 0) {
US_DEBUGP("cypress atacb not supported ???\n");
goto end;
}
/* if ck_cond flags is set, and there wasn't critical error,
* build the special sense
*/
if ((srb->result != (DID_ERROR << 16) &&
srb->result != (DID_ABORT << 16)) &&
save_cmnd[2] & 0x20) {
struct scsi_eh_save ses;
unsigned char regs[8];
unsigned char *sb = srb->sense_buffer;
unsigned char *desc = sb + 8;
int tmp_result;
/* build the command for
* reading the ATA registers */
scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
/* we use the same command as before, but we set
* the read taskfile bit, for not executing atacb command,
* but reading register selected in srb->cmnd[4]
*/
srb->cmd_len = 16;
srb->cmnd = ses.cmnd;
srb->cmnd[2] = 1;
usb_stor_transparent_scsi_command(srb, us);
memcpy(regs, srb->sense_buffer, sizeof(regs));
tmp_result = srb->result;
scsi_eh_restore_cmnd(srb, &ses);
/* we fail to get registers, report invalid command */
if (tmp_result != SAM_STAT_GOOD)
goto invalid_fld;
/* build the sense */
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
/* set sk, asc for a good command */
sb[1] = RECOVERED_ERROR;
sb[2] = 0; /* ATA PASS THROUGH INFORMATION AVAILABLE */
sb[3] = 0x1D;
/* XXX we should generate sk, asc, ascq from status and error
* regs
* (see 11.1 Error translation ATA device error to SCSI error
* map, and ata_to_sense_error from libata.)
*/
/* Sense data is current and format is descriptor. */
sb[0] = 0x72;
desc[0] = 0x09; /* ATA_RETURN_DESCRIPTOR */
/* set length of additional sense data */
sb[7] = 14;
desc[1] = 12;
/* Copy registers into sense buffer. */
desc[ 2] = 0x00;
desc[ 3] = regs[1]; /* features */
desc[ 5] = regs[2]; /* sector count */
desc[ 7] = regs[3]; /* lba low */
desc[ 9] = regs[4]; /* lba med */
desc[11] = regs[5]; /* lba high */
desc[12] = regs[6]; /* device */
desc[13] = regs[7]; /* command */
srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
}
goto end;
invalid_fld:
srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB));
end:
memcpy(srb->cmnd, save_cmnd, sizeof(save_cmnd));
if (srb->cmnd[0] == ATA_12)
srb->cmd_len = 12;
}
static int cypress_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - cypress_usb_ids) + cypress_unusual_dev_list);
if (result)
return result;
us->protocol_name = "Transparent SCSI with Cypress ATACB";
us->proto_handler = cypress_atacb_passthrough;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver cypress_driver = {
.name = "ums-cypress",
.probe = cypress_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = cypress_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_driver(cypress_driver);
| gpl-2.0 |
Ander-Alvarez/android_kernel_motorola_msm8226 | arch/s390/kvm/sigp.c | 4466 | 10740 | /*
* sigp.c - handlinge interprocessor communication
*
* Copyright IBM Corp. 2008,2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
* Christian Ehrhardt <ehrhardt@de.ibm.com>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/slab.h>
#include "gaccess.h"
#include "kvm-s390.h"
/* sigp order codes */
#define SIGP_SENSE 0x01
#define SIGP_EXTERNAL_CALL 0x02
#define SIGP_EMERGENCY 0x03
#define SIGP_START 0x04
#define SIGP_STOP 0x05
#define SIGP_RESTART 0x06
#define SIGP_STOP_STORE_STATUS 0x09
#define SIGP_INITIAL_CPU_RESET 0x0b
#define SIGP_CPU_RESET 0x0c
#define SIGP_SET_PREFIX 0x0d
#define SIGP_STORE_STATUS_ADDR 0x0e
#define SIGP_SET_ARCH 0x12
#define SIGP_SENSE_RUNNING 0x15
/* cpu status bits */
#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
#define SIGP_STAT_NOT_RUNNING 0x00000400UL
#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
#define SIGP_STAT_STOPPED 0x00000040UL
#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
#define SIGP_STAT_CHECK_STOP 0x00000010UL
#define SIGP_STAT_INOPERATIVE 0x00000004UL
#define SIGP_STAT_INVALID_ORDER 0x00000002UL
#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
rc = 3; /* not operational */
else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
rc = 1; /* status stored */
} else {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STAT_STOPPED;
rc = 1; /* status stored */
}
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
return rc;
}
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return -ENOMEM;
inti->type = KVM_S390_INT_EMERGENCY;
inti->emerg.code = vcpu->vcpu_id;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = 3; /* not operational */
kfree(inti);
goto unlock;
}
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
spin_unlock_bh(&li->lock);
rc = 0; /* order accepted */
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
return rc;
}
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_s390_interrupt_info *inti;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return -ENOMEM;
inti->type = KVM_S390_INT_EXTERNAL_CALL;
inti->extcall.code = vcpu->vcpu_id;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = 3; /* not operational */
kfree(inti);
goto unlock;
}
spin_lock_bh(&li->lock);
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
spin_unlock_bh(&li->lock);
rc = 0; /* order accepted */
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
return rc;
}
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
struct kvm_s390_interrupt_info *inti;
inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
if (!inti)
return -ENOMEM;
inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock);
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
goto out;
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
li->action_bits |= action;
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
out:
spin_unlock_bh(&li->lock);
return 0; /* order accepted */
}
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = 3; /* not operational */
goto unlock;
}
rc = __inject_sigp_stop(li, action);
unlock:
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
return rc;
}
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
return __inject_sigp_stop(li, action);
}
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
int rc;
switch (parameter & 0xff) {
case 0:
rc = 3; /* not operational */
break;
case 1:
case 2:
rc = 0; /* order accepted */
break;
default:
rc = -EOPNOTSUPP;
}
return rc;
}
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = NULL;
struct kvm_s390_interrupt_info *inti;
int rc;
u8 tmp;
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
*reg |= SIGP_STAT_INVALID_PARAMETER;
return 1; /* invalid parameter */
}
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
return 2; /* busy */
spin_lock(&fi->lock);
if (cpu_addr < KVM_MAX_VCPUS)
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = 1; /* incorrect state */
*reg &= SIGP_STAT_INCORRECT_STATE;
kfree(inti);
goto out_fi;
}
spin_lock_bh(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
rc = 1; /* incorrect state */
*reg &= SIGP_STAT_INCORRECT_STATE;
kfree(inti);
goto out_li;
}
inti->type = KVM_S390_SIGP_SET_PREFIX;
inti->prefix.address = address;
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
rc = 0; /* order accepted */
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
spin_unlock_bh(&li->lock);
out_fi:
spin_unlock(&fi->lock);
return rc;
}
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg)
{
int rc;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
rc = 3; /* not operational */
else {
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_RUNNING) {
/* running */
rc = 1;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STAT_NOT_RUNNING;
rc = 0;
}
}
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
rc);
return rc;
}
static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
int rc = 0;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
if (cpu_addr >= KVM_MAX_VCPUS)
return 3; /* not operational */
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
rc = 3; /* not operational */
goto out;
}
spin_lock_bh(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
rc = 2; /* busy */
else
VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
cpu_addr);
spin_unlock_bh(&li->lock);
out:
spin_unlock(&fi->lock);
return rc;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
int base2 = vcpu->arch.sie_block->ipb >> 28;
int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u32 parameter;
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code;
int rc;
/* sigp in userspace can exit */
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu,
PGM_PRIVILEGED_OPERATION);
order_code = disp2;
if (base2)
order_code += vcpu->run->s.regs.gprs[base2];
if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1];
else
parameter = vcpu->run->s.regs.gprs[r1 + 1];
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
rc = __sigp_sense(vcpu, cpu_addr,
&vcpu->run->s.regs.gprs[r1]);
break;
case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, cpu_addr);
break;
case SIGP_EMERGENCY:
vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, cpu_addr);
break;
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
break;
case SIGP_STOP_STORE_STATUS:
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
ACTION_STOP_ON_STOP);
break;
case SIGP_SET_ARCH:
vcpu->stat.instruction_sigp_arch++;
rc = __sigp_set_arch(vcpu, parameter);
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
&vcpu->run->s.regs.gprs[r1]);
break;
case SIGP_SENSE_RUNNING:
vcpu->stat.instruction_sigp_sense_running++;
rc = __sigp_sense_running(vcpu, cpu_addr,
&vcpu->run->s.regs.gprs[r1]);
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
rc = __sigp_restart(vcpu, cpu_addr);
if (rc == 2) /* busy */
break;
/* user space must know about restart */
default:
return -EOPNOTSUPP;
}
if (rc < 0)
return rc;
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
return 0;
}
| gpl-2.0 |
invisiblek/android_kernel_lge_vs450pp | drivers/mtd/maps/plat-ram.c | 4978 | 6435 | /* drivers/mtd/maps/plat-ram.c
*
* (c) 2004-2005 Simtec Electronics
* http://www.simtec.co.uk/products/SWLINUX/
* Ben Dooks <ben@simtec.co.uk>
*
* Generic platform device based RAM map
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/plat-ram.h>
#include <asm/io.h>
/* private structure for each mtd platform ram device created */
struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
struct resource *area;
struct platdata_mtd_ram *pdata;
};
/* to_platram_info()
*
* device private data to struct platram_info conversion
*/
static inline struct platram_info *to_platram_info(struct platform_device *dev)
{
return (struct platram_info *)platform_get_drvdata(dev);
}
/* platram_setrw
*
* call the platform device's set rw/ro control
*
* to = 0 => read-only
* = 1 => read-write
*/
static inline void platram_setrw(struct platram_info *info, int to)
{
if (info->pdata == NULL)
return;
if (info->pdata->set_rw != NULL)
(info->pdata->set_rw)(info->dev, to);
}
/* platram_remove
*
* called to remove the device from the driver's control
*/
static int platram_remove(struct platform_device *pdev)
{
struct platram_info *info = to_platram_info(pdev);
platform_set_drvdata(pdev, NULL);
dev_dbg(&pdev->dev, "removing device\n");
if (info == NULL)
return 0;
if (info->mtd) {
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
/* ensure ram is left read-only */
platram_setrw(info, PLATRAM_RO);
/* release resources */
if (info->area) {
release_resource(info->area);
kfree(info->area);
}
if (info->map.virt != NULL)
iounmap(info->map.virt);
kfree(info);
return 0;
}
/* platram_probe
*
* called from device drive system when a device matching our
* driver is found.
*/
static int platram_probe(struct platform_device *pdev)
{
struct platdata_mtd_ram *pdata;
struct platram_info *info;
struct resource *res;
int err = 0;
dev_dbg(&pdev->dev, "probe entered\n");
if (pdev->dev.platform_data == NULL) {
dev_err(&pdev->dev, "no platform data supplied\n");
err = -ENOENT;
goto exit_error;
}
pdata = pdev->dev.platform_data;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
platform_set_drvdata(pdev, info);
info->dev = &pdev->dev;
info->pdata = pdata;
/* get the resource for the memory mapping */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource specified\n");
err = -ENOENT;
goto exit_free;
}
dev_dbg(&pdev->dev, "got platform resource %p (0x%llx)\n", res,
(unsigned long long)res->start);
/* setup map parameters */
info->map.phys = res->start;
info->map.size = resource_size(res);
info->map.name = pdata->mapname != NULL ?
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
/* register our usage of the memory area */
info->area = request_mem_region(res->start, info->map.size, pdev->name);
if (info->area == NULL) {
dev_err(&pdev->dev, "failed to request memory region\n");
err = -EIO;
goto exit_free;
}
/* remap the memory area */
info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
if (info->map.virt == NULL) {
dev_err(&pdev->dev, "failed to ioremap() region\n");
err = -EIO;
goto exit_free;
}
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
/* probe for the right mtd map driver
* supplied by the platform_data struct */
if (pdata->map_probes) {
const char **map_probes = pdata->map_probes;
for ( ; !info->mtd && *map_probes; map_probes++)
info->mtd = do_map_probe(*map_probes , &info->map);
}
/* fallback to map_ram */
else
info->mtd = do_map_probe("map_ram", &info->map);
if (info->mtd == NULL) {
dev_err(&pdev->dev, "failed to probe for map_ram\n");
err = -ENOMEM;
goto exit_free;
}
info->mtd->owner = THIS_MODULE;
info->mtd->dev.parent = &pdev->dev;
platram_setrw(info, PLATRAM_RW);
/* check to see if there are any available partitions, or wether
* to add this device whole */
err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
pdata->partitions,
pdata->nr_partitions);
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
if (pdata->nr_partitions) {
/* add the whole device. */
err = mtd_device_register(info->mtd, NULL, 0);
if (err) {
dev_err(&pdev->dev,
"failed to register the entire device\n");
}
}
return err;
exit_free:
platram_remove(pdev);
exit_error:
return err;
}
/* device driver info */
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:mtd-ram");
static struct platform_driver platram_driver = {
.probe = platram_probe,
.remove = platram_remove,
.driver = {
.name = "mtd-ram",
.owner = THIS_MODULE,
},
};
/* module init/exit */
static int __init platram_init(void)
{
printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
return platform_driver_register(&platram_driver);
}
static void __exit platram_exit(void)
{
platform_driver_unregister(&platram_driver);
}
module_init(platram_init);
module_exit(platram_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("MTD platform RAM map driver");
| gpl-2.0 |
TeamExodus/kernel_yu_tomato | arch/openrisc/kernel/or32_ksyms.c | 9586 | 1349 | /*
* OpenRISC or32_ksyms.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/semaphore.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
/* compiler generated symbols */
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__divsi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(__modsi3);
DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
EXPORT_SYMBOL(__copy_tofrom_user);
| gpl-2.0 |
nikhil18/lightning-kernel-CAF | arch/sh/kernel/module.c | 9842 | 3741 | /* Kernel module help for SH.
SHcompact version by Kaz Kojima and Paul Mundt.
SHmedia bits:
Copyright 2004 SuperH (UK) Ltd
Author: Richard Curnow
Based on the sh version, and on code from the sh64-specific parts of
modutils, originally written by Richard Curnow and Ben Gaster.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <asm/dwarf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
case R_SH_DIR32:
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_REL32:
relocation = (relocation - (Elf32_Addr) location);
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_IMM_LOW16:
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16:
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
case R_SH_IMM_LOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_dwarf_cleanup(mod);
}
| gpl-2.0 |
VanirAOSP/kernel_htc_m7 | arch/x86/mm/setup_nx.c | 10098 | 1310 | #include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
static int disable_nx __cpuinitdata;
/*
* noexec = on|off
*
* Control non-executable mappings for processes.
*
* on Enable
* off Disable
*/
static int __init noexec_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strncmp(str, "on", 2)) {
disable_nx = 0;
} else if (!strncmp(str, "off", 3)) {
disable_nx = 1;
}
x86_configure_nx();
return 0;
}
early_param("noexec", noexec_setup);
void __cpuinit x86_configure_nx(void)
{
if (cpu_has_nx && !disable_nx)
__supported_pte_mask |= _PAGE_NX;
else
__supported_pte_mask &= ~_PAGE_NX;
}
void __init x86_report_nx(void)
{
if (!cpu_has_nx) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n");
} else {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
if (disable_nx) {
printk(KERN_INFO "NX (Execute Disable) protection: "
"disabled by kernel command line option\n");
} else {
printk(KERN_INFO "NX (Execute Disable) protection: "
"active\n");
}
#else
/* 32bit non-PAE kernel, NX cannot be used */
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"cannot be enabled: non-PAE kernel!\n");
#endif
}
}
| gpl-2.0 |
BrickedGrouperCandy/kernel_asus_grouper | arch/avr32/boards/atstk1000/setup.c | 11634 | 3511 | /*
* ATSTK1000 board-specific setup code.
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bootmem.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/linkage.h>
#include <video/atmel_lcdc.h>
#include <asm/setup.h>
#include <mach/at32ap700x.h>
#include <mach/board.h>
#include <mach/portmux.h>
#include "atstk1000.h"
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
static struct fb_videomode __initdata ltv350qv_modes[] = {
{
.name = "320x240 @ 75",
.refresh = 75,
.xres = 320, .yres = 240,
.pixclock = KHZ2PICOS(6891),
.left_margin = 17, .right_margin = 33,
.upper_margin = 10, .lower_margin = 10,
.hsync_len = 16, .vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
};
static struct fb_monspecs __initdata atstk1000_default_monspecs = {
.manufacturer = "SNG",
.monitor = "LTV350QV",
.modedb = ltv350qv_modes,
.modedb_len = ARRAY_SIZE(ltv350qv_modes),
.hfmin = 14820,
.hfmax = 22230,
.vfmin = 60,
.vfmax = 90,
.dclkmax = 30000000,
};
struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
| ATMEL_LCDC_INVCLK
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE
| ATMEL_LCDC_MEMOR_BIG),
.default_monspecs = &atstk1000_default_monspecs,
.guard_time = 2,
};
#ifdef CONFIG_BOARD_ATSTK1000_J2_LED
#include <linux/leds.h>
static struct gpio_led stk1000_j2_led[] = {
#ifdef CONFIG_BOARD_ATSTK1000_J2_LED8
#define LEDSTRING "J2 jumpered to LED8"
{ .name = "led0:amber", .gpio = GPIO_PIN_PB( 8), },
{ .name = "led1:amber", .gpio = GPIO_PIN_PB( 9), },
{ .name = "led2:amber", .gpio = GPIO_PIN_PB(10), },
{ .name = "led3:amber", .gpio = GPIO_PIN_PB(13), },
{ .name = "led4:amber", .gpio = GPIO_PIN_PB(14), },
{ .name = "led5:amber", .gpio = GPIO_PIN_PB(15), },
{ .name = "led6:amber", .gpio = GPIO_PIN_PB(16), },
{ .name = "led7:amber", .gpio = GPIO_PIN_PB(30),
.default_trigger = "heartbeat", },
#else /* RGB */
#define LEDSTRING "J2 jumpered to RGB LEDs"
{ .name = "r1:red", .gpio = GPIO_PIN_PB( 8), },
{ .name = "g1:green", .gpio = GPIO_PIN_PB(10), },
{ .name = "b1:blue", .gpio = GPIO_PIN_PB(14), },
{ .name = "r2:red", .gpio = GPIO_PIN_PB( 9),
.default_trigger = "heartbeat", },
{ .name = "g2:green", .gpio = GPIO_PIN_PB(13), },
{ .name = "b2:blue", .gpio = GPIO_PIN_PB(15),
.default_trigger = "heartbeat", },
/* PB16, PB30 unused */
#endif
};
static struct gpio_led_platform_data stk1000_j2_led_data = {
.num_leds = ARRAY_SIZE(stk1000_j2_led),
.leds = stk1000_j2_led,
};
static struct platform_device stk1000_j2_led_dev = {
.name = "leds-gpio",
.id = 2, /* gpio block J2 */
.dev = {
.platform_data = &stk1000_j2_led_data,
},
};
void __init atstk1000_setup_j2_leds(void)
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(stk1000_j2_led); i++)
at32_select_gpio(stk1000_j2_led[i].gpio, AT32_GPIOF_OUTPUT);
printk("STK1000: " LEDSTRING "\n");
platform_device_register(&stk1000_j2_led_dev);
}
#else /* CONFIG_BOARD_ATSTK1000_J2_LED */
void __init atstk1000_setup_j2_leds(void)
{
}
#endif /* CONFIG_BOARD_ATSTK1000_J2_LED */
| gpl-2.0 |
faux123/glacier-kernel-redux | arch/avr32/boards/atstk1000/setup.c | 11634 | 3511 | /*
* ATSTK1000 board-specific setup code.
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bootmem.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/linkage.h>
#include <video/atmel_lcdc.h>
#include <asm/setup.h>
#include <mach/at32ap700x.h>
#include <mach/board.h>
#include <mach/portmux.h>
#include "atstk1000.h"
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
static struct fb_videomode __initdata ltv350qv_modes[] = {
{
.name = "320x240 @ 75",
.refresh = 75,
.xres = 320, .yres = 240,
.pixclock = KHZ2PICOS(6891),
.left_margin = 17, .right_margin = 33,
.upper_margin = 10, .lower_margin = 10,
.hsync_len = 16, .vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
};
static struct fb_monspecs __initdata atstk1000_default_monspecs = {
.manufacturer = "SNG",
.monitor = "LTV350QV",
.modedb = ltv350qv_modes,
.modedb_len = ARRAY_SIZE(ltv350qv_modes),
.hfmin = 14820,
.hfmax = 22230,
.vfmin = 60,
.vfmax = 90,
.dclkmax = 30000000,
};
struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
| ATMEL_LCDC_INVCLK
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE
| ATMEL_LCDC_MEMOR_BIG),
.default_monspecs = &atstk1000_default_monspecs,
.guard_time = 2,
};
#ifdef CONFIG_BOARD_ATSTK1000_J2_LED
#include <linux/leds.h>
static struct gpio_led stk1000_j2_led[] = {
#ifdef CONFIG_BOARD_ATSTK1000_J2_LED8
#define LEDSTRING "J2 jumpered to LED8"
{ .name = "led0:amber", .gpio = GPIO_PIN_PB( 8), },
{ .name = "led1:amber", .gpio = GPIO_PIN_PB( 9), },
{ .name = "led2:amber", .gpio = GPIO_PIN_PB(10), },
{ .name = "led3:amber", .gpio = GPIO_PIN_PB(13), },
{ .name = "led4:amber", .gpio = GPIO_PIN_PB(14), },
{ .name = "led5:amber", .gpio = GPIO_PIN_PB(15), },
{ .name = "led6:amber", .gpio = GPIO_PIN_PB(16), },
{ .name = "led7:amber", .gpio = GPIO_PIN_PB(30),
.default_trigger = "heartbeat", },
#else /* RGB */
#define LEDSTRING "J2 jumpered to RGB LEDs"
{ .name = "r1:red", .gpio = GPIO_PIN_PB( 8), },
{ .name = "g1:green", .gpio = GPIO_PIN_PB(10), },
{ .name = "b1:blue", .gpio = GPIO_PIN_PB(14), },
{ .name = "r2:red", .gpio = GPIO_PIN_PB( 9),
.default_trigger = "heartbeat", },
{ .name = "g2:green", .gpio = GPIO_PIN_PB(13), },
{ .name = "b2:blue", .gpio = GPIO_PIN_PB(15),
.default_trigger = "heartbeat", },
/* PB16, PB30 unused */
#endif
};
static struct gpio_led_platform_data stk1000_j2_led_data = {
.num_leds = ARRAY_SIZE(stk1000_j2_led),
.leds = stk1000_j2_led,
};
static struct platform_device stk1000_j2_led_dev = {
.name = "leds-gpio",
.id = 2, /* gpio block J2 */
.dev = {
.platform_data = &stk1000_j2_led_data,
},
};
void __init atstk1000_setup_j2_leds(void)
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(stk1000_j2_led); i++)
at32_select_gpio(stk1000_j2_led[i].gpio, AT32_GPIOF_OUTPUT);
printk("STK1000: " LEDSTRING "\n");
platform_device_register(&stk1000_j2_led_dev);
}
#else /* CONFIG_BOARD_ATSTK1000_J2_LED */
void __init atstk1000_setup_j2_leds(void)
{
}
#endif /* CONFIG_BOARD_ATSTK1000_J2_LED */
| gpl-2.0 |
pio-masaki/CM10.1_kernel_tostab03 | fs/ntfs/collate.c | 14962 | 3675 | /*
* collate.c - NTFS kernel collation handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "collate.h"
#include "debug.h"
#include "ntfs.h"
static int ntfs_collate_binary(ntfs_volume *vol,
const void *data1, const int data1_len,
const void *data2, const int data2_len)
{
int rc;
ntfs_debug("Entering.");
rc = memcmp(data1, data2, min(data1_len, data2_len));
if (!rc && (data1_len != data2_len)) {
if (data1_len < data2_len)
rc = -1;
else
rc = 1;
}
ntfs_debug("Done, returning %i", rc);
return rc;
}
static int ntfs_collate_ntofs_ulong(ntfs_volume *vol,
const void *data1, const int data1_len,
const void *data2, const int data2_len)
{
int rc;
u32 d1, d2;
ntfs_debug("Entering.");
// FIXME: We don't really want to bug here.
BUG_ON(data1_len != data2_len);
BUG_ON(data1_len != 4);
d1 = le32_to_cpup(data1);
d2 = le32_to_cpup(data2);
if (d1 < d2)
rc = -1;
else {
if (d1 == d2)
rc = 0;
else
rc = 1;
}
ntfs_debug("Done, returning %i", rc);
return rc;
}
typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int,
const void *, const int);
static ntfs_collate_func_t ntfs_do_collate0x0[3] = {
ntfs_collate_binary,
NULL/*ntfs_collate_file_name*/,
NULL/*ntfs_collate_unicode_string*/,
};
static ntfs_collate_func_t ntfs_do_collate0x1[4] = {
ntfs_collate_ntofs_ulong,
NULL/*ntfs_collate_ntofs_sid*/,
NULL/*ntfs_collate_ntofs_security_hash*/,
NULL/*ntfs_collate_ntofs_ulongs*/,
};
/**
* ntfs_collate - collate two data items using a specified collation rule
* @vol: ntfs volume to which the data items belong
* @cr: collation rule to use when comparing the items
* @data1: first data item to collate
* @data1_len: length in bytes of @data1
* @data2: second data item to collate
* @data2_len: length in bytes of @data2
*
* Collate the two data items @data1 and @data2 using the collation rule @cr
* and return -1, 0, ir 1 if @data1 is found, respectively, to collate before,
* to match, or to collate after @data2.
*
* For speed we use the collation rule @cr as an index into two tables of
* function pointers to call the appropriate collation function.
*/
int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
const void *data1, const int data1_len,
const void *data2, const int data2_len) {
int i;
ntfs_debug("Entering.");
/*
* FIXME: At the moment we only support COLLATION_BINARY and
* COLLATION_NTOFS_ULONG, so we BUG() for everything else for now.
*/
BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG);
i = le32_to_cpu(cr);
BUG_ON(i < 0);
if (i <= 0x02)
return ntfs_do_collate0x0[i](vol, data1, data1_len,
data2, data2_len);
BUG_ON(i < 0x10);
i -= 0x10;
if (likely(i <= 3))
return ntfs_do_collate0x1[i](vol, data1, data1_len,
data2, data2_len);
BUG();
return 0;
}
| gpl-2.0 |
amitbagaria/samsung-kernel-latona | arch/arm/mach-at91/board-csb637.c | 1651 | 3642 | /*
* linux/arch/arm/mach-at91/board-csb637.c
*
* Copyright (C) 2005 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include "generic.h"
static void __init csb637_map_io(void)
{
/* Initialize processor: 3.6864 MHz crystal */
at91rm9200_initialize(3686400, AT91RM9200_BGA);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* make console=ttyS0 (ie, DBGU) the default */
at91_set_serial_console(0);
}
static void __init csb637_init_irq(void)
{
at91rm9200_init_interrupts(NULL);
}
static struct at91_eth_data __initdata csb637_eth_data = {
.phy_irq_pin = AT91_PIN_PC0,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata csb637_usbh_data = {
.ports = 2,
};
static struct at91_udc_data __initdata csb637_udc_data = {
.vbus_pin = AT91_PIN_PB28,
.pullup_pin = AT91_PIN_PB1,
};
#define CSB_FLASH_BASE AT91_CHIPSELECT_0
#define CSB_FLASH_SIZE SZ_16M
static struct mtd_partition csb_flash_partitions[] = {
{
.name = "uMON flash",
.offset = 0,
.size = MTDPART_SIZ_FULL,
.mask_flags = MTD_WRITEABLE, /* read only */
}
};
static struct physmap_flash_data csb_flash_data = {
.width = 2,
.parts = csb_flash_partitions,
.nr_parts = ARRAY_SIZE(csb_flash_partitions),
};
static struct resource csb_flash_resources[] = {
{
.start = CSB_FLASH_BASE,
.end = CSB_FLASH_BASE + CSB_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device csb_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &csb_flash_data,
},
.resource = csb_flash_resources,
.num_resources = ARRAY_SIZE(csb_flash_resources),
};
static struct gpio_led csb_leds[] = {
{ /* "d1", red */
.name = "d1",
.gpio = AT91_PIN_PB2,
.active_low = 1,
.default_trigger = "heartbeat",
},
};
static void __init csb637_board_init(void)
{
/* LED(s) */
at91_gpio_leds(csb_leds, ARRAY_SIZE(csb_leds));
/* Serial */
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&csb637_eth_data);
/* USB Host */
at91_add_device_usbh(&csb637_usbh_data);
/* USB Device */
at91_add_device_udc(&csb637_udc_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* SPI */
at91_add_device_spi(NULL, 0);
/* NOR flash */
platform_device_register(&csb_flash);
}
MACHINE_START(CSB637, "Cogent CSB637")
/* Maintainer: Bill Gatliff */
.phys_io = AT91_BASE_SYS,
.io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
.boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91rm9200_timer,
.map_io = csb637_map_io,
.init_irq = csb637_init_irq,
.init_machine = csb637_board_init,
MACHINE_END
| gpl-2.0 |
friedrich420/N910G-AEL-Kernel-Lollipop-Sources | sound/soc/samsung/spdif.c | 1907 | 12385 | /* sound/soc/samsung/spdif.c
*
* ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
*
* Copyright (c) 2010 Samsung Electronics Co. Ltd
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <linux/platform_data/asoc-s3c.h>
#include <mach/dma.h>
#include "dma.h"
#include "spdif.h"
/* Registers */
#define CLKCON 0x00
#define CON 0x04
#define BSTAS 0x08
#define CSTAS 0x0C
#define DATA_OUTBUF 0x10
#define DCNT 0x14
#define BSTAS_S 0x18
#define DCNT_S 0x1C
#define CLKCTL_MASK 0x7
#define CLKCTL_MCLK_EXT (0x1 << 2)
#define CLKCTL_PWR_ON (0x1 << 0)
#define CON_MASK 0x3ffffff
#define CON_FIFO_TH_SHIFT 19
#define CON_FIFO_TH_MASK (0x7 << 19)
#define CON_USERDATA_23RDBIT (0x1 << 12)
#define CON_SW_RESET (0x1 << 5)
#define CON_MCLKDIV_MASK (0x3 << 3)
#define CON_MCLKDIV_256FS (0x0 << 3)
#define CON_MCLKDIV_384FS (0x1 << 3)
#define CON_MCLKDIV_512FS (0x2 << 3)
#define CON_PCM_MASK (0x3 << 1)
#define CON_PCM_16BIT (0x0 << 1)
#define CON_PCM_20BIT (0x1 << 1)
#define CON_PCM_24BIT (0x2 << 1)
#define CON_PCM_DATA (0x1 << 0)
#define CSTAS_MASK 0x3fffffff
#define CSTAS_SAMP_FREQ_MASK (0xF << 24)
#define CSTAS_SAMP_FREQ_44 (0x0 << 24)
#define CSTAS_SAMP_FREQ_48 (0x2 << 24)
#define CSTAS_SAMP_FREQ_32 (0x3 << 24)
#define CSTAS_SAMP_FREQ_96 (0xA << 24)
#define CSTAS_CATEGORY_MASK (0xFF << 8)
#define CSTAS_CATEGORY_CODE_CDP (0x01 << 8)
#define CSTAS_NO_COPYRIGHT (0x1 << 2)
/**
* struct samsung_spdif_info - Samsung S/PDIF Controller information
* @lock: Spin lock for S/PDIF.
* @dev: The parent device passed to use from the probe.
* @regs: The pointer to the device register block.
* @clk_rate: Current clock rate for calcurate ratio.
* @pclk: The peri-clock pointer for spdif master operation.
* @sclk: The source clock pointer for making sync signals.
* @save_clkcon: Backup clkcon reg. in suspend.
* @save_con: Backup con reg. in suspend.
* @save_cstas: Backup cstas reg. in suspend.
* @dma_playback: DMA information for playback channel.
*/
struct samsung_spdif_info {
spinlock_t lock;
struct device *dev;
void __iomem *regs;
unsigned long clk_rate;
struct clk *pclk;
struct clk *sclk;
u32 saved_clkcon;
u32 saved_con;
u32 saved_cstas;
struct s3c_dma_params *dma_playback;
};
static struct s3c2410_dma_client spdif_dma_client_out = {
.name = "S/PDIF Stereo out",
};
static struct s3c_dma_params spdif_stereo_out;
static struct samsung_spdif_info spdif_info;
static inline struct samsung_spdif_info *to_info(struct snd_soc_dai *cpu_dai)
{
return snd_soc_dai_get_drvdata(cpu_dai);
}
static void spdif_snd_txctrl(struct samsung_spdif_info *spdif, int on)
{
void __iomem *regs = spdif->regs;
u32 clkcon;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
if (on)
writel(clkcon | CLKCTL_PWR_ON, regs + CLKCON);
else
writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
}
static int spdif_set_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct samsung_spdif_info *spdif = to_info(cpu_dai);
u32 clkcon;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
clkcon = readl(spdif->regs + CLKCON);
if (clk_id == SND_SOC_SPDIF_INT_MCLK)
clkcon &= ~CLKCTL_MCLK_EXT;
else
clkcon |= CLKCTL_MCLK_EXT;
writel(clkcon, spdif->regs + CLKCON);
spdif->clk_rate = freq;
return 0;
}
static int spdif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
unsigned long flags;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
spin_lock_irqsave(&spdif->lock, flags);
spdif_snd_txctrl(spdif, 1);
spin_unlock_irqrestore(&spdif->lock, flags);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
spin_lock_irqsave(&spdif->lock, flags);
spdif_snd_txctrl(spdif, 0);
spin_unlock_irqrestore(&spdif->lock, flags);
break;
default:
return -EINVAL;
}
return 0;
}
static int spdif_sysclk_ratios[] = {
512, 384, 256,
};
static int spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *socdai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
void __iomem *regs = spdif->regs;
struct s3c_dma_params *dma_data;
u32 con, clkcon, cstas;
unsigned long flags;
int i, ratio;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = spdif->dma_playback;
else {
dev_err(spdif->dev, "Capture is not supported\n");
return -EINVAL;
}
snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
spin_lock_irqsave(&spdif->lock, flags);
con = readl(regs + CON) & CON_MASK;
cstas = readl(regs + CSTAS) & CSTAS_MASK;
clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
con &= ~CON_FIFO_TH_MASK;
con |= (0x7 << CON_FIFO_TH_SHIFT);
con |= CON_USERDATA_23RDBIT;
con |= CON_PCM_DATA;
con &= ~CON_PCM_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
con |= CON_PCM_16BIT;
break;
default:
dev_err(spdif->dev, "Unsupported data size.\n");
goto err;
}
ratio = spdif->clk_rate / params_rate(params);
for (i = 0; i < ARRAY_SIZE(spdif_sysclk_ratios); i++)
if (ratio == spdif_sysclk_ratios[i])
break;
if (i == ARRAY_SIZE(spdif_sysclk_ratios)) {
dev_err(spdif->dev, "Invalid clock ratio %ld/%d\n",
spdif->clk_rate, params_rate(params));
goto err;
}
con &= ~CON_MCLKDIV_MASK;
switch (ratio) {
case 256:
con |= CON_MCLKDIV_256FS;
break;
case 384:
con |= CON_MCLKDIV_384FS;
break;
case 512:
con |= CON_MCLKDIV_512FS;
break;
}
cstas &= ~CSTAS_SAMP_FREQ_MASK;
switch (params_rate(params)) {
case 44100:
cstas |= CSTAS_SAMP_FREQ_44;
break;
case 48000:
cstas |= CSTAS_SAMP_FREQ_48;
break;
case 32000:
cstas |= CSTAS_SAMP_FREQ_32;
break;
case 96000:
cstas |= CSTAS_SAMP_FREQ_96;
break;
default:
dev_err(spdif->dev, "Invalid sampling rate %d\n",
params_rate(params));
goto err;
}
cstas &= ~CSTAS_CATEGORY_MASK;
cstas |= CSTAS_CATEGORY_CODE_CDP;
cstas |= CSTAS_NO_COPYRIGHT;
writel(con, regs + CON);
writel(cstas, regs + CSTAS);
writel(clkcon, regs + CLKCON);
spin_unlock_irqrestore(&spdif->lock, flags);
return 0;
err:
spin_unlock_irqrestore(&spdif->lock, flags);
return -EINVAL;
}
static void spdif_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
void __iomem *regs = spdif->regs;
u32 con, clkcon;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
con = readl(regs + CON) & CON_MASK;
clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
writel(con | CON_SW_RESET, regs + CON);
cpu_relax();
writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
}
#ifdef CONFIG_PM
static int spdif_suspend(struct snd_soc_dai *cpu_dai)
{
struct samsung_spdif_info *spdif = to_info(cpu_dai);
u32 con = spdif->saved_con;
dev_dbg(spdif->dev, "Entered %s\n", __func__);
spdif->saved_clkcon = readl(spdif->regs + CLKCON) & CLKCTL_MASK;
spdif->saved_con = readl(spdif->regs + CON) & CON_MASK;
spdif->saved_cstas = readl(spdif->regs + CSTAS) & CSTAS_MASK;
writel(con | CON_SW_RESET, spdif->regs + CON);
cpu_relax();
return 0;
}
static int spdif_resume(struct snd_soc_dai *cpu_dai)
{
struct samsung_spdif_info *spdif = to_info(cpu_dai);
dev_dbg(spdif->dev, "Entered %s\n", __func__);
writel(spdif->saved_clkcon, spdif->regs + CLKCON);
writel(spdif->saved_con, spdif->regs + CON);
writel(spdif->saved_cstas, spdif->regs + CSTAS);
return 0;
}
#else
#define spdif_suspend NULL
#define spdif_resume NULL
#endif
static const struct snd_soc_dai_ops spdif_dai_ops = {
.set_sysclk = spdif_set_sysclk,
.trigger = spdif_trigger,
.hw_params = spdif_hw_params,
.shutdown = spdif_shutdown,
};
static struct snd_soc_dai_driver samsung_spdif_dai = {
.name = "samsung-spdif",
.playback = {
.stream_name = "S/PDIF Playback",
.channels_min = 2,
.channels_max = 2,
.rates = (SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_96000),
.formats = SNDRV_PCM_FMTBIT_S16_LE, },
.ops = &spdif_dai_ops,
.suspend = spdif_suspend,
.resume = spdif_resume,
};
static const struct snd_soc_component_driver samsung_spdif_component = {
.name = "samsung-spdif",
};
static int spdif_probe(struct platform_device *pdev)
{
struct s3c_audio_pdata *spdif_pdata;
struct resource *mem_res, *dma_res;
struct samsung_spdif_info *spdif;
int ret;
spdif_pdata = pdev->dev.platform_data;
dev_dbg(&pdev->dev, "Entered %s\n", __func__);
dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!dma_res) {
dev_err(&pdev->dev, "Unable to get dma resource.\n");
return -ENXIO;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev, "Unable to get register resource.\n");
return -ENXIO;
}
if (spdif_pdata && spdif_pdata->cfg_gpio
&& spdif_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure GPIO pins\n");
return -EINVAL;
}
spdif = &spdif_info;
spdif->dev = &pdev->dev;
spin_lock_init(&spdif->lock);
spdif->pclk = clk_get(&pdev->dev, "spdif");
if (IS_ERR(spdif->pclk)) {
dev_err(&pdev->dev, "failed to get peri-clock\n");
ret = -ENOENT;
goto err0;
}
clk_prepare_enable(spdif->pclk);
spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
if (IS_ERR(spdif->sclk)) {
dev_err(&pdev->dev, "failed to get internal source clock\n");
ret = -ENOENT;
goto err1;
}
clk_prepare_enable(spdif->sclk);
/* Request S/PDIF Register's memory region */
if (!request_mem_region(mem_res->start,
resource_size(mem_res), "samsung-spdif")) {
dev_err(&pdev->dev, "Unable to request register region\n");
ret = -EBUSY;
goto err2;
}
spdif->regs = ioremap(mem_res->start, 0x100);
if (spdif->regs == NULL) {
dev_err(&pdev->dev, "Cannot ioremap registers\n");
ret = -ENXIO;
goto err3;
}
dev_set_drvdata(&pdev->dev, spdif);
ret = snd_soc_register_component(&pdev->dev, &samsung_spdif_component,
&samsung_spdif_dai, 1);
if (ret != 0) {
dev_err(&pdev->dev, "fail to register dai\n");
goto err4;
}
spdif_stereo_out.dma_size = 2;
spdif_stereo_out.client = &spdif_dma_client_out;
spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
spdif_stereo_out.channel = dma_res->start;
spdif->dma_playback = &spdif_stereo_out;
ret = asoc_dma_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
goto err5;
}
return 0;
err5:
snd_soc_unregister_component(&pdev->dev);
err4:
iounmap(spdif->regs);
err3:
release_mem_region(mem_res->start, resource_size(mem_res));
err2:
clk_disable_unprepare(spdif->sclk);
clk_put(spdif->sclk);
err1:
clk_disable_unprepare(spdif->pclk);
clk_put(spdif->pclk);
err0:
return ret;
}
static int spdif_remove(struct platform_device *pdev)
{
struct samsung_spdif_info *spdif = &spdif_info;
struct resource *mem_res;
asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
iounmap(spdif->regs);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem_res)
release_mem_region(mem_res->start, resource_size(mem_res));
clk_disable_unprepare(spdif->sclk);
clk_put(spdif->sclk);
clk_disable_unprepare(spdif->pclk);
clk_put(spdif->pclk);
return 0;
}
static struct platform_driver samsung_spdif_driver = {
.probe = spdif_probe,
.remove = spdif_remove,
.driver = {
.name = "samsung-spdif",
.owner = THIS_MODULE,
},
};
module_platform_driver(samsung_spdif_driver);
MODULE_AUTHOR("Seungwhan Youn, <sw.youn@samsung.com>");
MODULE_DESCRIPTION("Samsung S/PDIF Controller Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:samsung-spdif");
| gpl-2.0 |
linxiaoji/kernel_cancro | drivers/usb/host/ehci-mem.c | 2163 | 6957 | /*
* Copyright (c) 2001 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* There's basically three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... single shot DMA mapped
*
* There's also "register" data (e.g. PCI or SOC), which is memory mapped.
* No memory seen by this driver is pageable.
*/
/*-------------------------------------------------------------------------*/
/* Allocate the key transfer structures from the previously allocated pool */
static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
dma_addr_t dma)
{
memset (qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END(ehci);
qtd->hw_alt_next = EHCI_LIST_END(ehci);
INIT_LIST_HEAD (&qtd->qtd_list);
}
static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qtd *qtd;
dma_addr_t dma;
qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != NULL) {
ehci_qtd_init(ehci, qtd, dma);
}
return qtd;
}
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma);
}
static void qh_destroy(struct ehci_qh *qh)
{
struct ehci_hcd *ehci = qh->ehci;
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
BUG ();
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
{
struct ehci_qh *qh;
dma_addr_t dma;
qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
goto done;
qh->hw = (struct ehci_qh_hw *)
dma_pool_alloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
qh->refcount = 1;
qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
goto fail1;
}
done:
return qh;
fail1:
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
fail:
kfree(qh);
return NULL;
}
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
{
WARN_ON(!qh->refcount);
qh->refcount++;
return qh;
}
static inline void qh_put (struct ehci_qh *qh)
{
if (!--qh->refcount)
qh_destroy(qh);
}
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
free_cached_lists(ehci);
if (ehci->async)
qh_put (ehci->async);
ehci->async = NULL;
if (ehci->dummy)
qh_put(ehci->dummy);
ehci->dummy = NULL;
/* DMA consistent memory and pools */
if (ehci->qtd_pool)
dma_pool_destroy (ehci->qtd_pool);
ehci->qtd_pool = NULL;
if (ehci->qh_pool) {
dma_pool_destroy (ehci->qh_pool);
ehci->qh_pool = NULL;
}
if (ehci->itd_pool)
dma_pool_destroy (ehci->itd_pool);
ehci->itd_pool = NULL;
if (ehci->sitd_pool)
dma_pool_destroy (ehci->sitd_pool);
ehci->sitd_pool = NULL;
if (ehci->periodic)
dma_free_coherent (ehci_to_hcd(ehci)->self.controller,
ehci->periodic_size * sizeof (u32),
ehci->periodic, ehci->periodic_dma);
ehci->periodic = NULL;
/* shadow periodic table */
kfree(ehci->pshadow);
ehci->pshadow = NULL;
}
/* remember to add cleanup code (above) if you add anything here */
static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
{
int i;
size_t align;
align = ((ehci->pool_64_bit_align) ? 64 : 32);
/* QTDs for control/bulk/intr transfers */
ehci->qtd_pool = dma_pool_create ("ehci_qtd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_qtd),
align /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qtd_pool) {
goto fail;
}
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.controller,
sizeof(struct ehci_qh_hw),
align /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
goto fail;
}
ehci->async = ehci_qh_alloc (ehci, flags);
if (!ehci->async) {
goto fail;
}
/* ITD for high speed ISO transfers */
ehci->itd_pool = dma_pool_create ("ehci_itd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_itd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->itd_pool) {
goto fail;
}
/* SITD for full/low speed split ISO transfers */
ehci->sitd_pool = dma_pool_create ("ehci_sitd",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_sitd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->sitd_pool) {
goto fail;
}
/* Hardware periodic table */
ehci->periodic = (__le32 *)
dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
ehci->periodic_size * sizeof(__le32),
&ehci->periodic_dma, 0);
if (ehci->periodic == NULL) {
goto fail;
}
if (ehci->use_dummy_qh) {
struct ehci_qh_hw *hw;
ehci->dummy = ehci_qh_alloc(ehci, flags);
if (!ehci->dummy)
goto fail;
hw = ehci->dummy->hw;
hw->hw_next = EHCI_LIST_END(ehci);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
hw->hw_alt_next = EHCI_LIST_END(ehci);
hw->hw_token &= ~QTD_STS_ACTIVE;
ehci->dummy->hw = hw;
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = ehci->dummy->qh_dma;
} else {
for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic[i] = EHCI_LIST_END(ehci);
}
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
if (ehci->pshadow != NULL)
return 0;
fail:
ehci_dbg (ehci, "couldn't init memory\n");
ehci_mem_cleanup (ehci);
return -ENOMEM;
}
| gpl-2.0 |
ptmr3/Skyrocket_JB_Kernel | drivers/net/wireless/zd1211rw/zd_mac.c | 2419 | 39085 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
* Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
* Copyright (C) 2007-2008 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
#include "zd_def.h"
#include "zd_chip.h"
#include "zd_mac.h"
#include "zd_rf.h"
struct zd_reg_alpha2_map {
u32 reg;
char alpha2[2];
};
static struct zd_reg_alpha2_map reg_alpha2_map[] = {
{ ZD_REGDOMAIN_FCC, "US" },
{ ZD_REGDOMAIN_IC, "CA" },
{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
{ ZD_REGDOMAIN_JAPAN, "JP" },
{ ZD_REGDOMAIN_JAPAN_2, "JP" },
{ ZD_REGDOMAIN_JAPAN_3, "JP" },
{ ZD_REGDOMAIN_SPAIN, "ES" },
{ ZD_REGDOMAIN_FRANCE, "FR" },
};
/* This table contains the hardware specific values for the modulation rates. */
static const struct ieee80211_rate zd_rates[] = {
{ .bitrate = 10,
.hw_value = ZD_CCK_RATE_1M, },
{ .bitrate = 20,
.hw_value = ZD_CCK_RATE_2M,
.hw_value_short = ZD_CCK_RATE_2M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ZD_CCK_RATE_5_5M,
.hw_value_short = ZD_CCK_RATE_5_5M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ZD_CCK_RATE_11M,
.hw_value_short = ZD_CCK_RATE_11M | ZD_CCK_PREA_SHORT,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ZD_OFDM_RATE_6M,
.flags = 0 },
{ .bitrate = 90,
.hw_value = ZD_OFDM_RATE_9M,
.flags = 0 },
{ .bitrate = 120,
.hw_value = ZD_OFDM_RATE_12M,
.flags = 0 },
{ .bitrate = 180,
.hw_value = ZD_OFDM_RATE_18M,
.flags = 0 },
{ .bitrate = 240,
.hw_value = ZD_OFDM_RATE_24M,
.flags = 0 },
{ .bitrate = 360,
.hw_value = ZD_OFDM_RATE_36M,
.flags = 0 },
{ .bitrate = 480,
.hw_value = ZD_OFDM_RATE_48M,
.flags = 0 },
{ .bitrate = 540,
.hw_value = ZD_OFDM_RATE_54M,
.flags = 0 },
};
/*
* Zydas retry rates table. Each line is listed in the same order as
* in zd_rates[] and contains all the rate used when a packet is sent
* starting with a given rates. Let's consider an example :
*
* "11 Mbits : 4, 3, 2, 1, 0" means :
* - packet is sent using 4 different rates
* - 1st rate is index 3 (ie 11 Mbits)
* - 2nd rate is index 2 (ie 5.5 Mbits)
* - 3rd rate is index 1 (ie 2 Mbits)
* - 4th rate is index 0 (ie 1 Mbits)
*/
static const struct tx_retry_rate zd_retry_rates[] = {
{ /* 1 Mbits */ 1, { 0 }},
{ /* 2 Mbits */ 2, { 1, 0 }},
{ /* 5.5 Mbits */ 3, { 2, 1, 0 }},
{ /* 11 Mbits */ 4, { 3, 2, 1, 0 }},
{ /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }},
{ /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}},
{ /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }},
{ /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }},
{ /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }},
{ /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }},
{ /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }},
{ /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
};
static const struct ieee80211_channel zd_channels[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 },
};
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
static void housekeeping_disable(struct zd_mac *mac);
static void beacon_init(struct zd_mac *mac);
static void beacon_enable(struct zd_mac *mac);
static void beacon_disable(struct zd_mac *mac);
static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
static int zd_mac_config_beacon(struct ieee80211_hw *hw,
struct sk_buff *beacon);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
unsigned int i;
struct zd_reg_alpha2_map *reg_map;
for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
reg_map = ®_alpha2_map[i];
if (regdomain == reg_map->reg) {
alpha2[0] = reg_map->alpha2[0];
alpha2[1] = reg_map->alpha2[1];
return 0;
}
}
return 1;
}
int zd_mac_preinit_hw(struct ieee80211_hw *hw)
{
int r;
u8 addr[ETH_ALEN];
struct zd_mac *mac = zd_hw_mac(hw);
r = zd_chip_read_mac_addr_fw(&mac->chip, addr);
if (r)
return r;
SET_IEEE80211_PERM_ADDR(hw, addr);
return 0;
}
int zd_mac_init_hw(struct ieee80211_hw *hw)
{
int r;
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
char alpha2[2];
u8 default_regdomain;
r = zd_chip_enable_int(chip);
if (r)
goto out;
r = zd_chip_init_hw(chip);
if (r)
goto disable_int;
ZD_ASSERT(!irqs_disabled());
r = zd_read_regdomain(chip, &default_regdomain);
if (r)
goto disable_int;
spin_lock_irq(&mac->lock);
mac->regdomain = mac->default_regdomain = default_regdomain;
spin_unlock_irq(&mac->lock);
/* We must inform the device that we are doing encryption/decryption in
* software at the moment. */
r = zd_set_encryption_type(chip, ENC_SNIFFER);
if (r)
goto disable_int;
r = zd_reg2alpha2(mac->regdomain, alpha2);
if (r)
goto disable_int;
r = regulatory_hint(hw->wiphy, alpha2);
disable_int:
zd_chip_disable_int(chip);
out:
return r;
}
void zd_mac_clear(struct zd_mac *mac)
{
flush_workqueue(zd_workqueue);
zd_chip_clear(&mac->chip);
ZD_ASSERT(!spin_is_locked(&mac->lock));
ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
}
static int set_rx_filter(struct zd_mac *mac)
{
unsigned long flags;
u32 filter = STA_RX_FILTER;
spin_lock_irqsave(&mac->lock, flags);
if (mac->pass_ctrl)
filter |= RX_FILTER_CTRL;
spin_unlock_irqrestore(&mac->lock, flags);
return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
}
static int set_mac_and_bssid(struct zd_mac *mac)
{
int r;
if (!mac->vif)
return -1;
r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
if (r)
return r;
/* Vendor driver after setting MAC either sets BSSID for AP or
* filter for other modes.
*/
if (mac->type != NL80211_IFTYPE_AP)
return set_rx_filter(mac);
else
return zd_write_bssid(&mac->chip, mac->vif->addr);
}
static int set_mc_hash(struct zd_mac *mac)
{
struct zd_mc_hash hash;
zd_mc_clear(&hash);
return zd_chip_set_multicast_hash(&mac->chip, &hash);
}
int zd_op_start(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct zd_usb *usb = &chip->usb;
int r;
if (!usb->initialized) {
r = zd_usb_init_hw(usb);
if (r)
goto out;
}
r = zd_chip_enable_int(chip);
if (r < 0)
goto out;
r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
if (r < 0)
goto disable_int;
r = set_rx_filter(mac);
if (r)
goto disable_int;
r = set_mc_hash(mac);
if (r)
goto disable_int;
r = zd_chip_switch_radio_on(chip);
if (r < 0)
goto disable_int;
r = zd_chip_enable_rxtx(chip);
if (r < 0)
goto disable_radio;
r = zd_chip_enable_hwint(chip);
if (r < 0)
goto disable_rxtx;
housekeeping_enable(mac);
beacon_enable(mac);
set_bit(ZD_DEVICE_RUNNING, &mac->flags);
return 0;
disable_rxtx:
zd_chip_disable_rxtx(chip);
disable_radio:
zd_chip_switch_radio_off(chip);
disable_int:
zd_chip_disable_int(chip);
out:
return r;
}
void zd_op_stop(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct sk_buff *skb;
struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
/* The order here deliberately is a little different from the open()
* method, since we need to make sure there is no opportunity for RX
* frames to be processed by mac80211 after we have stopped it.
*/
zd_chip_disable_rxtx(chip);
beacon_disable(mac);
housekeeping_disable(mac);
flush_workqueue(zd_workqueue);
zd_chip_disable_hwint(chip);
zd_chip_switch_radio_off(chip);
zd_chip_disable_int(chip);
while ((skb = skb_dequeue(ack_wait_queue)))
dev_kfree_skb_any(skb);
}
int zd_restore_settings(struct zd_mac *mac)
{
struct sk_buff *beacon;
struct zd_mc_hash multicast_hash;
unsigned int short_preamble;
int r, beacon_interval, beacon_period;
u8 channel;
dev_dbg_f(zd_mac_dev(mac), "\n");
spin_lock_irq(&mac->lock);
multicast_hash = mac->multicast_hash;
short_preamble = mac->short_preamble;
beacon_interval = mac->beacon.interval;
beacon_period = mac->beacon.period;
channel = mac->channel;
spin_unlock_irq(&mac->lock);
r = set_mac_and_bssid(mac);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
return r;
}
r = zd_chip_set_channel(&mac->chip, channel);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
r);
return r;
}
set_rts_cts(mac, short_preamble);
r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
if (r < 0) {
dev_dbg_f(zd_mac_dev(mac),
"zd_chip_set_multicast_hash failed, %d\n", r);
return r;
}
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
if (mac->vif != NULL) {
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon) {
zd_mac_config_beacon(mac->hw, beacon);
kfree_skb(beacon);
}
}
zd_set_beacon_interval(&mac->chip, beacon_interval,
beacon_period, mac->type);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
return 0;
}
/**
* zd_mac_tx_status - reports tx status of a packet if required
* @hw - a &struct ieee80211_hw pointer
* @skb - a sk-buffer
* @flags: extra flags to set in the TX status info
* @ackssi: ACK signal strength
* @success - True for successful transmission of the frame
*
* This information calls ieee80211_tx_status_irqsafe() if required by the
* control information. It copies the control information into the status
* information.
*
* If no status information has been requested, the skb is freed.
*/
static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
int ackssi, struct tx_status *tx_status)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int i;
int success = 1, retry = 1;
int first_idx;
const struct tx_retry_rate *retries;
ieee80211_tx_info_clear_status(info);
if (tx_status) {
success = !tx_status->failure;
retry = tx_status->retry + success;
}
if (success) {
/* success */
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
/* failure */
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
ZD_ASSERT(1 <= retry && retry <= retries->count);
info->status.rates[0].idx = retries->rate[0];
info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
info->status.rates[i].idx = retries->rate[i];
info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
}
for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
info->status.rates[i].idx = retries->rate[retry - 1];
info->status.rates[i].count = 1; // (success ? 1:2);
}
if (i<IEEE80211_TX_MAX_RATES)
info->status.rates[i].idx = -1; /* terminate */
info->status.ack_signal = ackssi;
ieee80211_tx_status_irqsafe(hw, skb);
}
/**
* zd_mac_tx_failed - callback for failed frames
* @dev: the mac80211 wireless device
*
* This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
* reported as error to the upper layers.
*/
void zd_mac_tx_failed(struct urb *urb)
{
struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
struct zd_mac *mac = zd_hw_mac(hw);
struct sk_buff_head *q = &mac->ack_wait_queue;
struct sk_buff *skb;
struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
unsigned long flags;
int success = !tx_status->failure;
int retry = tx_status->retry + success;
int found = 0;
int i, position = 0;
q = &mac->ack_wait_queue;
spin_lock_irqsave(&q->lock, flags);
skb_queue_walk(q, skb) {
struct ieee80211_hdr *tx_hdr;
struct ieee80211_tx_info *info;
int first_idx, final_idx;
const struct tx_retry_rate *retries;
u8 final_rate;
position ++;
/* if the hardware reports a failure and we had a 802.11 ACK
* pending, then we skip the first skb when searching for a
* matching frame */
if (tx_status->failure && mac->ack_pending &&
skb_queue_is_first(q, skb)) {
continue;
}
tx_hdr = (struct ieee80211_hdr *)skb->data;
/* we skip all frames not matching the reported destination */
if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
continue;
}
/* we skip all frames not matching the reported final rate */
info = IEEE80211_SKB_CB(skb);
first_idx = info->status.rates[0].idx;
ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
retries = &zd_retry_rates[first_idx];
if (retry <= 0 || retry > retries->count)
continue;
final_idx = retries->rate[retry - 1];
final_rate = zd_rates[final_idx].hw_value;
if (final_rate != tx_status->rate) {
continue;
}
found = 1;
break;
}
if (found) {
for (i=1; i<=position; i++) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb,
mac->ack_pending ? mac->ack_signal : 0,
i == position ? tx_status : NULL);
mac->ack_pending = 0;
}
}
spin_unlock_irqrestore(&q->lock, flags);
}
/**
* zd_mac_tx_to_dev - callback for USB layer
* @skb: a &sk_buff pointer
* @error: error value, 0 if transmission successful
*
* Informs the MAC layer that the frame has successfully transferred to the
* device. If an ACK is required and the transfer to the device has been
* successful, the packets are put on the @ack_wait_queue with
* the control set removed.
*/
void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hw *hw = info->rate_driver_data[0];
struct zd_mac *mac = zd_hw_mac(hw);
ieee80211_tx_info_clear_status(info);
skb_pull(skb, sizeof(struct zd_ctrlset));
if (unlikely(error ||
(info->flags & IEEE80211_TX_CTL_NO_ACK))) {
/*
* FIXME : do we need to fill in anything ?
*/
ieee80211_tx_status_irqsafe(hw, skb);
} else {
struct sk_buff_head *q = &mac->ack_wait_queue;
skb_queue_tail(q, skb);
while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
zd_mac_tx_status(hw, skb_dequeue(q),
mac->ack_pending ? mac->ack_signal : 0,
NULL);
mac->ack_pending = 0;
}
}
}
static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
{
/* ZD_PURE_RATE() must be used to remove the modulation type flag of
* the zd-rate values.
*/
static const u8 rate_divisor[] = {
[ZD_PURE_RATE(ZD_CCK_RATE_1M)] = 1,
[ZD_PURE_RATE(ZD_CCK_RATE_2M)] = 2,
/* Bits must be doubled. */
[ZD_PURE_RATE(ZD_CCK_RATE_5_5M)] = 11,
[ZD_PURE_RATE(ZD_CCK_RATE_11M)] = 11,
[ZD_PURE_RATE(ZD_OFDM_RATE_6M)] = 6,
[ZD_PURE_RATE(ZD_OFDM_RATE_9M)] = 9,
[ZD_PURE_RATE(ZD_OFDM_RATE_12M)] = 12,
[ZD_PURE_RATE(ZD_OFDM_RATE_18M)] = 18,
[ZD_PURE_RATE(ZD_OFDM_RATE_24M)] = 24,
[ZD_PURE_RATE(ZD_OFDM_RATE_36M)] = 36,
[ZD_PURE_RATE(ZD_OFDM_RATE_48M)] = 48,
[ZD_PURE_RATE(ZD_OFDM_RATE_54M)] = 54,
};
u32 bits = (u32)tx_length * 8;
u32 divisor;
divisor = rate_divisor[ZD_PURE_RATE(zd_rate)];
if (divisor == 0)
return -EINVAL;
switch (zd_rate) {
case ZD_CCK_RATE_5_5M:
bits = (2*bits) + 10; /* round up to the next integer */
break;
case ZD_CCK_RATE_11M:
if (service) {
u32 t = bits % 11;
*service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION;
if (0 < t && t <= 3) {
*service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION;
}
}
bits += 10; /* round up to the next integer */
break;
}
return bits/divisor;
}
static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
struct ieee80211_hdr *header,
struct ieee80211_tx_info *info)
{
/*
* CONTROL TODO:
* - if backoff needed, enable bit 0
* - if burst (backoff not needed) disable bit 0
*/
cs->control = 0;
/* First fragment */
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
/* No ACK expected (multicast, etc.) */
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
cs->control |= ZD_CS_NO_ACK;
/* PS-POLL */
if (ieee80211_is_pspoll(header->frame_control))
cs->control |= ZD_CS_PS_POLL_FRAME;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
cs->control |= ZD_CS_RTS;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
cs->control |= ZD_CS_SELF_CTS;
/* FIXME: Management frame? */
}
static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
{
struct zd_mac *mac = zd_hw_mac(hw);
int r, ret, num_cmds, req_pos = 0;
u32 tmp, j = 0;
/* 4 more bytes for tail CRC */
u32 full_len = beacon->len + 4;
unsigned long end_jiffies, message_jiffies;
struct zd_ioreq32 *ioreqs;
/* Alloc memory for full beacon write at once. */
num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
if (!ioreqs)
return -ENOMEM;
mutex_lock(&mac->chip.mutex);
r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto out;
r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto release_sema;
end_jiffies = jiffies + HZ / 2; /*~500ms*/
message_jiffies = jiffies + HZ / 10; /*~100ms*/
while (tmp & 0x2) {
r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto release_sema;
if (time_is_before_eq_jiffies(message_jiffies)) {
message_jiffies = jiffies + HZ / 10;
dev_err(zd_mac_dev(mac),
"CR_BCN_FIFO_SEMAPHORE not ready\n");
if (time_is_before_eq_jiffies(end_jiffies)) {
dev_err(zd_mac_dev(mac),
"Giving up beacon config.\n");
r = -ETIMEDOUT;
goto reset_device;
}
}
msleep(20);
}
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = full_len - 1;
req_pos++;
if (zd_chip_is_zd1211b(&mac->chip)) {
ioreqs[req_pos].addr = CR_BCN_LENGTH;
ioreqs[req_pos].value = full_len - 1;
req_pos++;
}
for (j = 0 ; j < beacon->len; j++) {
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
req_pos++;
}
for (j = 0; j < 4; j++) {
ioreqs[req_pos].addr = CR_BCN_FIFO;
ioreqs[req_pos].value = 0x0;
req_pos++;
}
BUG_ON(req_pos != num_cmds);
r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
release_sema:
/*
* Try very hard to release device beacon semaphore, as otherwise
* device/driver can be left in unusable state.
*/
end_jiffies = jiffies + HZ / 2; /*~500ms*/
ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
while (ret < 0) {
if (time_is_before_eq_jiffies(end_jiffies)) {
ret = -ETIMEDOUT;
break;
}
msleep(20);
ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
}
if (ret < 0)
dev_err(zd_mac_dev(mac), "Could not release "
"CR_BCN_FIFO_SEMAPHORE!\n");
if (r < 0 || ret < 0) {
if (r >= 0)
r = ret;
goto out;
}
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
*/
r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
CR_BCN_PLCP_CFG);
out:
mutex_unlock(&mac->chip.mutex);
kfree(ioreqs);
return r;
reset_device:
mutex_unlock(&mac->chip.mutex);
kfree(ioreqs);
/* semaphore stuck, reset device to avoid fw freeze later */
dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
"reseting device...");
usb_queue_reset_device(mac->chip.usb.intf);
return r;
}
static int fill_ctrlset(struct zd_mac *mac,
struct sk_buff *skb)
{
int r;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
unsigned int frag_len = skb->len + FCS_LEN;
unsigned int packet_length;
struct ieee80211_rate *txrate;
struct zd_ctrlset *cs = (struct zd_ctrlset *)
skb_push(skb, sizeof(struct zd_ctrlset));
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
ZD_ASSERT(frag_len <= 0xffff);
txrate = ieee80211_get_tx_rate(mac->hw, info);
cs->modulation = txrate->hw_value;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
cs->modulation = txrate->hw_value_short;
cs->tx_length = cpu_to_le16(frag_len);
cs_set_control(mac, cs, hdr, info);
packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
ZD_ASSERT(packet_length <= 0xffff);
/* ZD1211B: Computing the length difference this way, gives us
* flexibility to compute the packet length.
*/
cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ?
packet_length - frag_len : packet_length);
/*
* CURRENT LENGTH:
* - transmit frame length in microseconds
* - seems to be derived from frame length
* - see Cal_Us_Service() in zdinlinef.h
* - if macp->bTxBurstEnable is enabled, then multiply by 4
* - bTxBurstEnable is never set in the vendor driver
*
* SERVICE:
* - "for PLCP configuration"
* - always 0 except in some situations at 802.11b 11M
* - see line 53 of zdinlinef.h
*/
cs->service = 0;
r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation),
le16_to_cpu(cs->tx_length));
if (r < 0)
return r;
cs->current_length = cpu_to_le16(r);
cs->next_frame_length = 0;
return 0;
}
/**
* zd_op_tx - transmits a network frame to the device
*
* @dev: mac80211 hardware device
* @skb: socket buffer
* @control: the control structure
*
* This function transmit an IEEE 802.11 network frame to the device. The
* control block of the skbuff will be initialized. If necessary the incoming
* mac80211 queues will be stopped.
*/
static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int r;
r = fill_ctrlset(mac, skb);
if (r)
goto fail;
info->rate_driver_data[0] = hw;
r = zd_usb_tx(&mac->chip.usb, skb);
if (r)
goto fail;
return;
fail:
dev_kfree_skb(skb);
}
/**
* filter_ack - filters incoming packets for acknowledgements
* @dev: the mac80211 device
* @rx_hdr: received header
* @stats: the status for the received packet
*
* This functions looks for ACK packets and tries to match them with the
* frames in the tx queue. If a match is found the frame will be dequeued and
* the upper layers is informed about the successful transmission. If
* mac80211 queues have been stopped and the number of frames still to be
* transmitted is low the queues will be opened again.
*
* Returns 1 if the frame was an ACK, 0 if it was ignored.
*/
static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_rx_status *stats)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct sk_buff *skb;
struct sk_buff_head *q;
unsigned long flags;
int found = 0;
int i, position = 0;
if (!ieee80211_is_ack(rx_hdr->frame_control))
return 0;
q = &mac->ack_wait_queue;
spin_lock_irqsave(&q->lock, flags);
skb_queue_walk(q, skb) {
struct ieee80211_hdr *tx_hdr;
position ++;
if (mac->ack_pending && skb_queue_is_first(q, skb))
continue;
tx_hdr = (struct ieee80211_hdr *)skb->data;
if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{
found = 1;
break;
}
}
if (found) {
for (i=1; i<position; i++) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb,
mac->ack_pending ? mac->ack_signal : 0,
NULL);
mac->ack_pending = 0;
}
mac->ack_pending = 1;
mac->ack_signal = stats->signal;
/* Prevent pending tx-packet on AP-mode */
if (mac->type == NL80211_IFTYPE_AP) {
skb = __skb_dequeue(q);
zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
mac->ack_pending = 0;
}
}
spin_unlock_irqrestore(&q->lock, flags);
return 1;
}
int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_rx_status stats;
const struct rx_status *status;
struct sk_buff *skb;
int bad_frame = 0;
__le16 fc;
int need_padding;
int i;
u8 rate;
if (length < ZD_PLCP_HEADER_SIZE + 10 /* IEEE80211_1ADDR_LEN */ +
FCS_LEN + sizeof(struct rx_status))
return -EINVAL;
memset(&stats, 0, sizeof(stats));
/* Note about pass_failed_fcs and pass_ctrl access below:
* mac locking intentionally omitted here, as this is the only unlocked
* reader and the only writer is configure_filter. Plus, if there were
* any races accessing these variables, it wouldn't really matter.
* If mac80211 ever provides a way for us to access filter flags
* from outside configure_filter, we could improve on this. Also, this
* situation may change once we implement some kind of DMA-into-skb
* RX path. */
/* Caller has to ensure that length >= sizeof(struct rx_status). */
status = (struct rx_status *)
(buffer + (length - sizeof(struct rx_status)));
if (status->frame_status & ZD_RX_ERROR) {
if (mac->pass_failed_fcs &&
(status->frame_status & ZD_RX_CRC32_ERROR)) {
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
bad_frame = 1;
} else {
return -EINVAL;
}
}
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = status->signal_strength;
rate = zd_rx_rate(buffer, status);
/* todo: return index in the big switches in zd_rx_rate instead */
for (i = 0; i < mac->band.n_bitrates; i++)
if (rate == mac->band.bitrates[i].hw_value)
stats.rate_idx = i;
length -= ZD_PLCP_HEADER_SIZE + sizeof(struct rx_status);
buffer += ZD_PLCP_HEADER_SIZE;
/* Except for bad frames, filter each frame to see if it is an ACK, in
* which case our internal TX tracking is updated. Normally we then
* bail here as there's no need to pass ACKs on up to the stack, but
* there is also the case where the stack has requested us to pass
* control frames on up (pass_ctrl) which we must consider. */
if (!bad_frame &&
filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats)
&& !mac->pass_ctrl)
return 0;
fc = get_unaligned((__le16*)buffer);
need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
if (skb == NULL)
return -ENOMEM;
if (need_padding) {
/* Make sure the payload data is 4 byte aligned. */
skb_reserve(skb, 2);
}
/* FIXME : could we avoid this big memcpy ? */
memcpy(skb_put(skb, length), buffer, length);
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
ieee80211_rx_irqsafe(hw, skb);
return 0;
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
/* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
mac->vif = vif;
return set_mac_and_bssid(mac);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
mac->vif = NULL;
zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
}
static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
spin_lock_irq(&mac->lock);
mac->channel = conf->channel->hw_value;
spin_unlock_irq(&mac->lock);
return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
}
static void zd_beacon_done(struct zd_mac *mac)
{
struct sk_buff *skb, *beacon;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
return;
if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
return;
/*
* Send out buffered broad- and multicast frames.
*/
while (!ieee80211_queue_stopped(mac->hw, 0)) {
skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
if (!skb)
break;
zd_op_tx(mac->hw, skb);
}
/*
* Fetch next beacon so that tim_count is updated.
*/
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon) {
zd_mac_config_beacon(mac->hw, beacon);
kfree_skb(beacon);
}
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
static void zd_process_intr(struct work_struct *work)
{
u16 int_status;
unsigned long flags;
struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
spin_lock_irqsave(&mac->lock, flags);
int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
spin_unlock_irqrestore(&mac->lock, flags);
if (int_status & INT_CFG_NEXT_BCN) {
/*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
zd_beacon_done(mac);
} else {
dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
}
zd_chip_enable_hwint(&mac->chip);
}
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_mc_hash hash;
struct netdev_hw_addr *ha;
zd_mc_clear(&hash);
netdev_hw_addr_list_for_each(ha, mc_list) {
dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
zd_mc_add_addr(&hash, ha->addr);
}
return hash.low | ((u64)hash.high << 32);
}
#define SUPPORTED_FIF_FLAGS \
(FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
static void zd_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct zd_mc_hash hash = {
.low = multicast,
.high = multicast >> 32,
};
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
int r;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
/*
* If multicast parameter (as returned by zd_op_prepare_multicast)
* has changed, no bit in changed_flags is set. To handle this
* situation, we do not return if changed_flags is 0. If we do so,
* we will have some issue with IPv6 which uses multicast for link
* layer address resolution.
*/
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
spin_lock_irqsave(&mac->lock, flags);
mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL);
mac->pass_ctrl = !!(*new_flags & FIF_CONTROL);
mac->multicast_hash = hash;
spin_unlock_irqrestore(&mac->lock, flags);
zd_chip_set_multicast_hash(&mac->chip, &hash);
if (changed_flags & FIF_CONTROL) {
r = set_rx_filter(mac);
if (r)
dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
}
/* no handling required for FIF_OTHER_BSS as we don't currently
* do BSSID filtering */
/* FIXME: in future it would be nice to enable the probe response
* filter (so that the driver doesn't see them) until
* FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd
* have to schedule work to enable prbresp reception, which might
* happen too late. For now we'll just listen and forward them all the
* time. */
}
static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
{
mutex_lock(&mac->chip.mutex);
zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
mutex_unlock(&mac->chip.mutex);
}
static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
mac->type == NL80211_IFTYPE_ADHOC ||
mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
zd_chip_disable_hwint(&mac->chip);
zd_mac_config_beacon(hw, beacon);
zd_chip_enable_hwint(&mac->chip);
kfree_skb(beacon);
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
u16 interval = 0;
u8 period = 0;
if (bss_conf->enable_beacon) {
period = bss_conf->dtim_period;
interval = bss_conf->beacon_int;
}
spin_lock_irq(&mac->lock);
mac->beacon.period = period;
mac->beacon.interval = interval;
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
zd_set_beacon_interval(&mac->chip, interval, period,
mac->type);
}
} else
associated = is_valid_ether_addr(bss_conf->bssid);
spin_lock_irq(&mac->lock);
mac->associated = associated;
spin_unlock_irq(&mac->lock);
/* TODO: do hardware bssid filtering */
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
spin_lock_irq(&mac->lock);
mac->short_preamble = bss_conf->use_short_preamble;
spin_unlock_irq(&mac->lock);
set_rts_cts(mac, bss_conf->use_short_preamble);
}
}
static u64 zd_op_get_tsf(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
return zd_chip_get_tsf(&mac->chip);
}
static const struct ieee80211_ops zd_ops = {
.tx = zd_op_tx,
.start = zd_op_start,
.stop = zd_op_stop,
.add_interface = zd_op_add_interface,
.remove_interface = zd_op_remove_interface,
.config = zd_op_config,
.prepare_multicast = zd_op_prepare_multicast,
.configure_filter = zd_op_configure_filter,
.bss_info_changed = zd_op_bss_info_changed,
.get_tsf = zd_op_get_tsf,
};
struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
{
struct zd_mac *mac;
struct ieee80211_hw *hw;
hw = ieee80211_alloc_hw(sizeof(struct zd_mac), &zd_ops);
if (!hw) {
dev_dbg_f(&intf->dev, "out of memory\n");
return NULL;
}
mac = zd_hw_mac(hw);
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->hw = hw;
mac->type = NL80211_IFTYPE_UNSPECIFIED;
memcpy(mac->channels, zd_channels, sizeof(zd_channels));
memcpy(mac->rates, zd_rates, sizeof(zd_rates));
mac->band.n_bitrates = ARRAY_SIZE(zd_rates);
mac->band.bitrates = mac->rates;
mac->band.n_channels = ARRAY_SIZE(zd_channels);
mac->band.channels = mac->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
hw->max_signal = 100;
hw->queues = 1;
hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
/*
* Tell mac80211 that we support multi rate retries
*/
hw->max_rates = IEEE80211_TX_MAX_RATES;
hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
skb_queue_head_init(&mac->ack_wait_queue);
mac->ack_pending = 0;
zd_chip_init(&mac->chip, hw, intf);
housekeeping_init(mac);
beacon_init(mac);
INIT_WORK(&mac->process_intr, zd_process_intr);
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
static void beacon_watchdog_handler(struct work_struct *work)
{
struct zd_mac *mac =
container_of(work, struct zd_mac, beacon.watchdog_work.work);
struct sk_buff *beacon;
unsigned long timeout;
int interval, period;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
goto rearm;
if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
goto rearm;
spin_lock_irq(&mac->lock);
interval = mac->beacon.interval;
period = mac->beacon.period;
timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
spin_unlock_irq(&mac->lock);
if (interval > 0 && time_is_before_jiffies(timeout)) {
dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
"restarting. "
"(interval: %d, dtim: %d)\n",
interval, period);
zd_chip_disable_hwint(&mac->chip);
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon) {
zd_mac_config_beacon(mac->hw, beacon);
kfree_skb(beacon);
}
zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
zd_chip_enable_hwint(&mac->chip);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
}
rearm:
queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
BEACON_WATCHDOG_DELAY);
}
static void beacon_init(struct zd_mac *mac)
{
INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
}
static void beacon_enable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
mac->beacon.last_update = jiffies;
queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
BEACON_WATCHDOG_DELAY);
}
static void beacon_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_delayed_work_sync(&mac->beacon.watchdog_work);
}
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(struct work_struct *work)
{
struct zd_mac *mac =
container_of(work, struct zd_mac, housekeeping.link_led_work.work);
struct zd_chip *chip = &mac->chip;
int is_associated;
int r;
if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
goto requeue;
spin_lock_irq(&mac->lock);
is_associated = mac->associated;
spin_unlock_irq(&mac->lock);
r = zd_chip_control_leds(chip,
is_associated ? ZD_LED_ASSOCIATED : ZD_LED_SCANNING);
if (r)
dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
requeue:
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
LINK_LED_WORK_DELAY);
}
static void housekeeping_init(struct zd_mac *mac)
{
INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
}
static void housekeeping_enable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
0);
}
static void housekeeping_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
}
| gpl-2.0 |
faux123/BBB_kernel | fs/coda/psdev.c | 2675 | 10627 | /*
* An implementation of a loadable kernel mode driver providing
* multiple kernel/user space bidirectional communications links.
*
* Author: Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Adapted to become the Linux 2.0 Coda pseudo device
* Peter Braam <braam@maths.ox.ac.uk>
* Michael Callahan <mjc@emmy.smith.edu>
*
* Changes for Linux 2.1
* Copyright (c) 1997 Carnegie-Mellon University
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/time.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/pid_namespace.h>
#include <asm/io.h>
#include <asm/poll.h>
#include <asm/uaccess.h>
#include <linux/coda.h>
#include <linux/coda_psdev.h>
#include "coda_linux.h"
#include "coda_int.h"
/* statistics */
int coda_hard; /* allows signals during upcalls */
unsigned long coda_timeout = 30; /* .. secs, then signals will dequeue */
struct venus_comm coda_comms[MAX_CODADEVS];
static struct class *coda_psdev_class;
/*
* Device operations
*/
static unsigned int coda_psdev_poll(struct file *file, poll_table * wait)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
unsigned int mask = POLLOUT | POLLWRNORM;
poll_wait(file, &vcp->vc_waitq, wait);
mutex_lock(&vcp->vc_mutex);
if (!list_empty(&vcp->vc_pending))
mask |= POLLIN | POLLRDNORM;
mutex_unlock(&vcp->vc_mutex);
return mask;
}
static long coda_psdev_ioctl(struct file * filp, unsigned int cmd, unsigned long arg)
{
unsigned int data;
switch(cmd) {
case CIOC_KERNEL_VERSION:
data = CODA_KERNEL_VERSION;
return put_user(data, (int __user *) arg);
default:
return -ENOTTY;
}
return 0;
}
/*
* Receive a message written by Venus to the psdev
*/
static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *off)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
struct upc_req *req = NULL;
struct upc_req *tmp;
struct list_head *lh;
struct coda_in_hdr hdr;
ssize_t retval = 0, count = 0;
int error;
/* Peek at the opcode, uniquefier */
if (copy_from_user(&hdr, buf, 2 * sizeof(u_long)))
return -EFAULT;
if (DOWNCALL(hdr.opcode)) {
union outputArgs *dcbuf;
int size = sizeof(*dcbuf);
if ( nbytes < sizeof(struct coda_out_hdr) ) {
printk("coda_downcall opc %d uniq %d, not enough!\n",
hdr.opcode, hdr.unique);
count = nbytes;
goto out;
}
if ( nbytes > size ) {
printk("Coda: downcall opc %d, uniq %d, too much!",
hdr.opcode, hdr.unique);
nbytes = size;
}
CODA_ALLOC(dcbuf, union outputArgs *, nbytes);
if (copy_from_user(dcbuf, buf, nbytes)) {
CODA_FREE(dcbuf, nbytes);
retval = -EFAULT;
goto out;
}
/* what downcall errors does Venus handle ? */
error = coda_downcall(vcp, hdr.opcode, dcbuf);
CODA_FREE(dcbuf, nbytes);
if (error) {
printk("psdev_write: coda_downcall error: %d\n", error);
retval = error;
goto out;
}
count = nbytes;
goto out;
}
/* Look for the message on the processing queue. */
mutex_lock(&vcp->vc_mutex);
list_for_each(lh, &vcp->vc_processing) {
tmp = list_entry(lh, struct upc_req , uc_chain);
if (tmp->uc_unique == hdr.unique) {
req = tmp;
list_del(&req->uc_chain);
break;
}
}
mutex_unlock(&vcp->vc_mutex);
if (!req) {
printk("psdev_write: msg (%d, %d) not found\n",
hdr.opcode, hdr.unique);
retval = -ESRCH;
goto out;
}
/* move data into response buffer. */
if (req->uc_outSize < nbytes) {
printk("psdev_write: too much cnt: %d, cnt: %ld, opc: %d, uniq: %d.\n",
req->uc_outSize, (long)nbytes, hdr.opcode, hdr.unique);
nbytes = req->uc_outSize; /* don't have more space! */
}
if (copy_from_user(req->uc_data, buf, nbytes)) {
req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
retval = -EFAULT;
goto out;
}
/* adjust outsize. is this useful ?? */
req->uc_outSize = nbytes;
req->uc_flags |= CODA_REQ_WRITE;
count = nbytes;
/* Convert filedescriptor into a file handle */
if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data;
if (!outp->oh.result)
outp->fh = fget(outp->fd);
}
wake_up(&req->uc_sleep);
out:
return(count ? count : retval);
}
/*
* Read a message from the kernel to Venus
*/
static ssize_t coda_psdev_read(struct file * file, char __user * buf,
size_t nbytes, loff_t *off)
{
DECLARE_WAITQUEUE(wait, current);
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
struct upc_req *req;
ssize_t retval = 0, count = 0;
if (nbytes == 0)
return 0;
mutex_lock(&vcp->vc_mutex);
add_wait_queue(&vcp->vc_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (list_empty(&vcp->vc_pending)) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
mutex_unlock(&vcp->vc_mutex);
schedule();
mutex_lock(&vcp->vc_mutex);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&vcp->vc_waitq, &wait);
if (retval)
goto out;
req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain);
list_del(&req->uc_chain);
/* Move the input args into userspace */
count = req->uc_inSize;
if (nbytes < req->uc_inSize) {
printk ("psdev_read: Venus read %ld bytes of %d in message\n",
(long)nbytes, req->uc_inSize);
count = nbytes;
}
if (copy_to_user(buf, req->uc_data, count))
retval = -EFAULT;
/* If request was not a signal, enqueue and don't free */
if (!(req->uc_flags & CODA_REQ_ASYNC)) {
req->uc_flags |= CODA_REQ_READ;
list_add_tail(&(req->uc_chain), &vcp->vc_processing);
goto out;
}
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kfree(req);
out:
mutex_unlock(&vcp->vc_mutex);
return (count ? count : retval);
}
static int coda_psdev_open(struct inode * inode, struct file * file)
{
struct venus_comm *vcp;
int idx, err;
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
if (current_user_ns() != &init_user_ns)
return -EINVAL;
idx = iminor(inode);
if (idx < 0 || idx >= MAX_CODADEVS)
return -ENODEV;
err = -EBUSY;
vcp = &coda_comms[idx];
mutex_lock(&vcp->vc_mutex);
if (!vcp->vc_inuse) {
vcp->vc_inuse++;
INIT_LIST_HEAD(&vcp->vc_pending);
INIT_LIST_HEAD(&vcp->vc_processing);
init_waitqueue_head(&vcp->vc_waitq);
vcp->vc_sb = NULL;
vcp->vc_seq = 0;
file->private_data = vcp;
err = 0;
}
mutex_unlock(&vcp->vc_mutex);
return err;
}
static int coda_psdev_release(struct inode * inode, struct file * file)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
struct upc_req *req, *tmp;
if (!vcp || !vcp->vc_inuse ) {
printk("psdev_release: Not open.\n");
return -1;
}
mutex_lock(&vcp->vc_mutex);
/* Wakeup clients so they can return. */
list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
list_del(&req->uc_chain);
/* Async requests need to be freed here */
if (req->uc_flags & CODA_REQ_ASYNC) {
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kfree(req);
continue;
}
req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
}
list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) {
list_del(&req->uc_chain);
req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
}
file->private_data = NULL;
vcp->vc_inuse--;
mutex_unlock(&vcp->vc_mutex);
return 0;
}
static const struct file_operations coda_psdev_fops = {
.owner = THIS_MODULE,
.read = coda_psdev_read,
.write = coda_psdev_write,
.poll = coda_psdev_poll,
.unlocked_ioctl = coda_psdev_ioctl,
.open = coda_psdev_open,
.release = coda_psdev_release,
.llseek = noop_llseek,
};
static int init_coda_psdev(void)
{
int i, err = 0;
if (register_chrdev(CODA_PSDEV_MAJOR, "coda", &coda_psdev_fops)) {
printk(KERN_ERR "coda_psdev: unable to get major %d\n",
CODA_PSDEV_MAJOR);
return -EIO;
}
coda_psdev_class = class_create(THIS_MODULE, "coda");
if (IS_ERR(coda_psdev_class)) {
err = PTR_ERR(coda_psdev_class);
goto out_chrdev;
}
for (i = 0; i < MAX_CODADEVS; i++) {
mutex_init(&(&coda_comms[i])->vc_mutex);
device_create(coda_psdev_class, NULL,
MKDEV(CODA_PSDEV_MAJOR, i), NULL, "cfs%d", i);
}
coda_sysctl_init();
goto out;
out_chrdev:
unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
out:
return err;
}
MODULE_AUTHOR("Jan Harkes, Peter J. Braam");
MODULE_DESCRIPTION("Coda Distributed File System VFS interface");
MODULE_ALIAS_CHARDEV_MAJOR(CODA_PSDEV_MAJOR);
MODULE_LICENSE("GPL");
MODULE_VERSION("6.6");
static int __init init_coda(void)
{
int status;
int i;
status = coda_init_inodecache();
if (status)
goto out2;
status = init_coda_psdev();
if ( status ) {
printk("Problem (%d) in init_coda_psdev\n", status);
goto out1;
}
status = register_filesystem(&coda_fs_type);
if (status) {
printk("coda: failed to register filesystem!\n");
goto out;
}
return 0;
out:
for (i = 0; i < MAX_CODADEVS; i++)
device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i));
class_destroy(coda_psdev_class);
unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
coda_sysctl_clean();
out1:
coda_destroy_inodecache();
out2:
return status;
}
static void __exit exit_coda(void)
{
int err, i;
err = unregister_filesystem(&coda_fs_type);
if ( err != 0 ) {
printk("coda: failed to unregister filesystem\n");
}
for (i = 0; i < MAX_CODADEVS; i++)
device_destroy(coda_psdev_class, MKDEV(CODA_PSDEV_MAJOR, i));
class_destroy(coda_psdev_class);
unregister_chrdev(CODA_PSDEV_MAJOR, "coda");
coda_sysctl_clean();
coda_destroy_inodecache();
}
module_init(init_coda);
module_exit(exit_coda);
| gpl-2.0 |
ExpressOS/third_party-l4android | drivers/media/dvb/siano/smscoreapi.c | 2675 | 42885 | /*
* Siano core API module
*
* This file contains implementation for the interface to sms core component
*
* author: Uri Shkolnik
*
* Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
*
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/wait.h>
#include <asm/byteorder.h>
#include "smscoreapi.h"
#include "sms-cards.h"
#include "smsir.h"
#include "smsendian.h"
static int sms_dbg;
module_param_named(debug, sms_dbg, int, 0644);
MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
struct smscore_device_notifyee_t {
struct list_head entry;
hotplug_t hotplug;
};
struct smscore_idlist_t {
struct list_head entry;
int id;
int data_type;
};
struct smscore_client_t {
struct list_head entry;
struct smscore_device_t *coredev;
void *context;
struct list_head idlist;
onresponse_t onresponse_handler;
onremove_t onremove_handler;
};
void smscore_set_board_id(struct smscore_device_t *core, int id)
{
core->board_id = id;
}
int smscore_led_state(struct smscore_device_t *core, int led)
{
if (led >= 0)
core->led_state = led;
return core->led_state;
}
EXPORT_SYMBOL_GPL(smscore_set_board_id);
int smscore_get_board_id(struct smscore_device_t *core)
{
return core->board_id;
}
EXPORT_SYMBOL_GPL(smscore_get_board_id);
struct smscore_registry_entry_t {
struct list_head entry;
char devpath[32];
int mode;
enum sms_device_type_st type;
};
static struct list_head g_smscore_notifyees;
static struct list_head g_smscore_devices;
static struct mutex g_smscore_deviceslock;
static struct list_head g_smscore_registry;
static struct mutex g_smscore_registrylock;
static int default_mode = 4;
module_param(default_mode, int, 0644);
MODULE_PARM_DESC(default_mode, "default firmware id (device mode)");
static struct smscore_registry_entry_t *smscore_find_registry(char *devpath)
{
struct smscore_registry_entry_t *entry;
struct list_head *next;
kmutex_lock(&g_smscore_registrylock);
for (next = g_smscore_registry.next;
next != &g_smscore_registry;
next = next->next) {
entry = (struct smscore_registry_entry_t *) next;
if (!strcmp(entry->devpath, devpath)) {
kmutex_unlock(&g_smscore_registrylock);
return entry;
}
}
entry = kmalloc(sizeof(struct smscore_registry_entry_t), GFP_KERNEL);
if (entry) {
entry->mode = default_mode;
strcpy(entry->devpath, devpath);
list_add(&entry->entry, &g_smscore_registry);
} else
sms_err("failed to create smscore_registry.");
kmutex_unlock(&g_smscore_registrylock);
return entry;
}
int smscore_registry_getmode(char *devpath)
{
struct smscore_registry_entry_t *entry;
entry = smscore_find_registry(devpath);
if (entry)
return entry->mode;
else
sms_err("No registry found.");
return default_mode;
}
EXPORT_SYMBOL_GPL(smscore_registry_getmode);
static enum sms_device_type_st smscore_registry_gettype(char *devpath)
{
struct smscore_registry_entry_t *entry;
entry = smscore_find_registry(devpath);
if (entry)
return entry->type;
else
sms_err("No registry found.");
return -1;
}
void smscore_registry_setmode(char *devpath, int mode)
{
struct smscore_registry_entry_t *entry;
entry = smscore_find_registry(devpath);
if (entry)
entry->mode = mode;
else
sms_err("No registry found.");
}
static void smscore_registry_settype(char *devpath,
enum sms_device_type_st type)
{
struct smscore_registry_entry_t *entry;
entry = smscore_find_registry(devpath);
if (entry)
entry->type = type;
else
sms_err("No registry found.");
}
static void list_add_locked(struct list_head *new, struct list_head *head,
spinlock_t *lock)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
list_add(new, head);
spin_unlock_irqrestore(lock, flags);
}
/**
* register a client callback that called when device plugged in/unplugged
* NOTE: if devices exist callback is called immediately for each device
*
* @param hotplug callback
*
* @return 0 on success, <0 on error.
*/
int smscore_register_hotplug(hotplug_t hotplug)
{
struct smscore_device_notifyee_t *notifyee;
struct list_head *next, *first;
int rc = 0;
kmutex_lock(&g_smscore_deviceslock);
notifyee = kmalloc(sizeof(struct smscore_device_notifyee_t),
GFP_KERNEL);
if (notifyee) {
/* now notify callback about existing devices */
first = &g_smscore_devices;
for (next = first->next;
next != first && !rc;
next = next->next) {
struct smscore_device_t *coredev =
(struct smscore_device_t *) next;
rc = hotplug(coredev, coredev->device, 1);
}
if (rc >= 0) {
notifyee->hotplug = hotplug;
list_add(¬ifyee->entry, &g_smscore_notifyees);
} else
kfree(notifyee);
} else
rc = -ENOMEM;
kmutex_unlock(&g_smscore_deviceslock);
return rc;
}
EXPORT_SYMBOL_GPL(smscore_register_hotplug);
/**
* unregister a client callback that called when device plugged in/unplugged
*
* @param hotplug callback
*
*/
void smscore_unregister_hotplug(hotplug_t hotplug)
{
struct list_head *next, *first;
kmutex_lock(&g_smscore_deviceslock);
first = &g_smscore_notifyees;
for (next = first->next; next != first;) {
struct smscore_device_notifyee_t *notifyee =
(struct smscore_device_notifyee_t *) next;
next = next->next;
if (notifyee->hotplug == hotplug) {
list_del(¬ifyee->entry);
kfree(notifyee);
}
}
kmutex_unlock(&g_smscore_deviceslock);
}
EXPORT_SYMBOL_GPL(smscore_unregister_hotplug);
static void smscore_notify_clients(struct smscore_device_t *coredev)
{
struct smscore_client_t *client;
/* the client must call smscore_unregister_client from remove handler */
while (!list_empty(&coredev->clients)) {
client = (struct smscore_client_t *) coredev->clients.next;
client->onremove_handler(client->context);
}
}
static int smscore_notify_callbacks(struct smscore_device_t *coredev,
struct device *device, int arrival)
{
struct list_head *next, *first;
int rc = 0;
/* note: must be called under g_deviceslock */
first = &g_smscore_notifyees;
for (next = first->next; next != first; next = next->next) {
rc = ((struct smscore_device_notifyee_t *) next)->
hotplug(coredev, device, arrival);
if (rc < 0)
break;
}
return rc;
}
static struct
smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
dma_addr_t common_buffer_phys)
{
struct smscore_buffer_t *cb =
kmalloc(sizeof(struct smscore_buffer_t), GFP_KERNEL);
if (!cb) {
sms_info("kmalloc(...) failed");
return NULL;
}
cb->p = buffer;
cb->offset_in_common = buffer - (u8 *) common_buffer;
cb->phys = common_buffer_phys + cb->offset_in_common;
return cb;
}
/**
* creates coredev object for a device, prepares buffers,
* creates buffer mappings, notifies registered hotplugs about new device.
*
* @param params device pointer to struct with device specific parameters
* and handlers
* @param coredev pointer to a value that receives created coredev object
*
* @return 0 on success, <0 on error.
*/
int smscore_register_device(struct smsdevice_params_t *params,
struct smscore_device_t **coredev)
{
struct smscore_device_t *dev;
u8 *buffer;
dev = kzalloc(sizeof(struct smscore_device_t), GFP_KERNEL);
if (!dev) {
sms_info("kzalloc(...) failed");
return -ENOMEM;
}
/* init list entry so it could be safe in smscore_unregister_device */
INIT_LIST_HEAD(&dev->entry);
/* init queues */
INIT_LIST_HEAD(&dev->clients);
INIT_LIST_HEAD(&dev->buffers);
/* init locks */
spin_lock_init(&dev->clientslock);
spin_lock_init(&dev->bufferslock);
/* init completion events */
init_completion(&dev->version_ex_done);
init_completion(&dev->data_download_done);
init_completion(&dev->trigger_done);
init_completion(&dev->init_device_done);
init_completion(&dev->reload_start_done);
init_completion(&dev->resume_done);
init_completion(&dev->gpio_configuration_done);
init_completion(&dev->gpio_set_level_done);
init_completion(&dev->gpio_get_level_done);
init_completion(&dev->ir_init_done);
/* Buffer management */
init_waitqueue_head(&dev->buffer_mng_waitq);
/* alloc common buffer */
dev->common_buffer_size = params->buffer_size * params->num_buffers;
dev->common_buffer = dma_alloc_coherent(NULL, dev->common_buffer_size,
&dev->common_buffer_phys,
GFP_KERNEL | GFP_DMA);
if (!dev->common_buffer) {
smscore_unregister_device(dev);
return -ENOMEM;
}
/* prepare dma buffers */
for (buffer = dev->common_buffer;
dev->num_buffers < params->num_buffers;
dev->num_buffers++, buffer += params->buffer_size) {
struct smscore_buffer_t *cb =
smscore_createbuffer(buffer, dev->common_buffer,
dev->common_buffer_phys);
if (!cb) {
smscore_unregister_device(dev);
return -ENOMEM;
}
smscore_putbuffer(dev, cb);
}
sms_info("allocated %d buffers", dev->num_buffers);
dev->mode = DEVICE_MODE_NONE;
dev->context = params->context;
dev->device = params->device;
dev->setmode_handler = params->setmode_handler;
dev->detectmode_handler = params->detectmode_handler;
dev->sendrequest_handler = params->sendrequest_handler;
dev->preload_handler = params->preload_handler;
dev->postload_handler = params->postload_handler;
dev->device_flags = params->flags;
strcpy(dev->devpath, params->devpath);
smscore_registry_settype(dev->devpath, params->device_type);
/* add device to devices list */
kmutex_lock(&g_smscore_deviceslock);
list_add(&dev->entry, &g_smscore_devices);
kmutex_unlock(&g_smscore_deviceslock);
*coredev = dev;
sms_info("device %p created", dev);
return 0;
}
EXPORT_SYMBOL_GPL(smscore_register_device);
static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
void *buffer, size_t size, struct completion *completion) {
int rc = coredev->sendrequest_handler(coredev->context, buffer, size);
if (rc < 0) {
sms_info("sendrequest returned error %d", rc);
return rc;
}
return wait_for_completion_timeout(completion,
msecs_to_jiffies(SMS_PROTOCOL_MAX_RAOUNDTRIP_MS)) ?
0 : -ETIME;
}
/**
* Starts & enables IR operations
*
* @return 0 on success, < 0 on error.
*/
static int smscore_init_ir(struct smscore_device_t *coredev)
{
int ir_io;
int rc;
void *buffer;
coredev->ir.dev = NULL;
ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir;
if (ir_io) {/* only if IR port exist we use IR sub-module */
sms_info("IR loading");
rc = sms_ir_init(coredev);
if (rc != 0)
sms_err("Error initialization DTV IR sub-module");
else {
buffer = kmalloc(sizeof(struct SmsMsgData_ST2) +
SMS_DMA_ALIGNMENT,
GFP_KERNEL | GFP_DMA);
if (buffer) {
struct SmsMsgData_ST2 *msg =
(struct SmsMsgData_ST2 *)
SMS_ALIGN_ADDRESS(buffer);
SMS_INIT_MSG(&msg->xMsgHeader,
MSG_SMS_START_IR_REQ,
sizeof(struct SmsMsgData_ST2));
msg->msgData[0] = coredev->ir.controller;
msg->msgData[1] = coredev->ir.timeout;
smsendian_handle_tx_message(
(struct SmsMsgHdr_ST2 *)msg);
rc = smscore_sendrequest_and_wait(coredev, msg,
msg->xMsgHeader. msgLength,
&coredev->ir_init_done);
kfree(buffer);
} else
sms_err
("Sending IR initialization message failed");
}
} else
sms_info("IR port has not been detected");
return 0;
}
/**
* sets initial device mode and notifies client hotplugs that device is ready
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
* @return 0 on success, <0 on error.
*/
int smscore_start_device(struct smscore_device_t *coredev)
{
int rc = smscore_set_device_mode(
coredev, smscore_registry_getmode(coredev->devpath));
if (rc < 0) {
sms_info("set device mode faile , rc %d", rc);
return rc;
}
kmutex_lock(&g_smscore_deviceslock);
rc = smscore_notify_callbacks(coredev, coredev->device, 1);
smscore_init_ir(coredev);
sms_info("device %p started, rc %d", coredev, rc);
kmutex_unlock(&g_smscore_deviceslock);
return rc;
}
EXPORT_SYMBOL_GPL(smscore_start_device);
static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
void *buffer, size_t size)
{
struct SmsFirmware_ST *firmware = (struct SmsFirmware_ST *) buffer;
struct SmsMsgHdr_ST *msg;
u32 mem_address;
u8 *payload = firmware->Payload;
int rc = 0;
firmware->StartAddress = le32_to_cpu(firmware->StartAddress);
firmware->Length = le32_to_cpu(firmware->Length);
mem_address = firmware->StartAddress;
sms_info("loading FW to addr 0x%x size %d",
mem_address, firmware->Length);
if (coredev->preload_handler) {
rc = coredev->preload_handler(coredev->context);
if (rc < 0)
return rc;
}
/* PAGE_SIZE buffer shall be enough and dma aligned */
msg = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
if (!msg)
return -ENOMEM;
if (coredev->mode != DEVICE_MODE_NONE) {
sms_debug("sending reload command.");
SMS_INIT_MSG(msg, MSG_SW_RELOAD_START_REQ,
sizeof(struct SmsMsgHdr_ST));
rc = smscore_sendrequest_and_wait(coredev, msg,
msg->msgLength,
&coredev->reload_start_done);
mem_address = *(u32 *) &payload[20];
}
while (size && rc >= 0) {
struct SmsDataDownload_ST *DataMsg =
(struct SmsDataDownload_ST *) msg;
int payload_size = min((int) size, SMS_MAX_PAYLOAD_SIZE);
SMS_INIT_MSG(msg, MSG_SMS_DATA_DOWNLOAD_REQ,
(u16)(sizeof(struct SmsMsgHdr_ST) +
sizeof(u32) + payload_size));
DataMsg->MemAddr = mem_address;
memcpy(DataMsg->Payload, payload, payload_size);
if ((coredev->device_flags & SMS_ROM_NO_RESPONSE) &&
(coredev->mode == DEVICE_MODE_NONE))
rc = coredev->sendrequest_handler(
coredev->context, DataMsg,
DataMsg->xMsgHeader.msgLength);
else
rc = smscore_sendrequest_and_wait(
coredev, DataMsg,
DataMsg->xMsgHeader.msgLength,
&coredev->data_download_done);
payload += payload_size;
size -= payload_size;
mem_address += payload_size;
}
if (rc >= 0) {
if (coredev->mode == DEVICE_MODE_NONE) {
struct SmsMsgData_ST *TriggerMsg =
(struct SmsMsgData_ST *) msg;
SMS_INIT_MSG(msg, MSG_SMS_SWDOWNLOAD_TRIGGER_REQ,
sizeof(struct SmsMsgHdr_ST) +
sizeof(u32) * 5);
TriggerMsg->msgData[0] = firmware->StartAddress;
/* Entry point */
TriggerMsg->msgData[1] = 5; /* Priority */
TriggerMsg->msgData[2] = 0x200; /* Stack size */
TriggerMsg->msgData[3] = 0; /* Parameter */
TriggerMsg->msgData[4] = 4; /* Task ID */
if (coredev->device_flags & SMS_ROM_NO_RESPONSE) {
rc = coredev->sendrequest_handler(
coredev->context, TriggerMsg,
TriggerMsg->xMsgHeader.msgLength);
msleep(100);
} else
rc = smscore_sendrequest_and_wait(
coredev, TriggerMsg,
TriggerMsg->xMsgHeader.msgLength,
&coredev->trigger_done);
} else {
SMS_INIT_MSG(msg, MSG_SW_RELOAD_EXEC_REQ,
sizeof(struct SmsMsgHdr_ST));
rc = coredev->sendrequest_handler(coredev->context,
msg, msg->msgLength);
}
msleep(500);
}
sms_debug("rc=%d, postload=%p ", rc,
coredev->postload_handler);
kfree(msg);
return ((rc >= 0) && coredev->postload_handler) ?
coredev->postload_handler(coredev->context) :
rc;
}
/**
* loads specified firmware into a buffer and calls device loadfirmware_handler
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param filename null-terminated string specifies firmware file name
* @param loadfirmware_handler device handler that loads firmware
*
* @return 0 on success, <0 on error.
*/
static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
char *filename,
loadfirmware_t loadfirmware_handler)
{
int rc = -ENOENT;
const struct firmware *fw;
u8 *fw_buffer;
if (loadfirmware_handler == NULL && !(coredev->device_flags &
SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, filename, coredev->device);
if (rc < 0) {
sms_info("failed to open \"%s\"", filename);
return rc;
}
sms_info("read FW %s, size=%zd", filename, fw->size);
fw_buffer = kmalloc(ALIGN(fw->size, SMS_ALLOC_ALIGNMENT),
GFP_KERNEL | GFP_DMA);
if (fw_buffer) {
memcpy(fw_buffer, fw->data, fw->size);
rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ?
smscore_load_firmware_family2(coredev,
fw_buffer,
fw->size) :
loadfirmware_handler(coredev->context,
fw_buffer, fw->size);
kfree(fw_buffer);
} else {
sms_info("failed to allocate firmware buffer");
rc = -ENOMEM;
}
release_firmware(fw);
return rc;
}
/**
* notifies all clients registered with the device, notifies hotplugs,
* frees all buffers and coredev object
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
* @return 0 on success, <0 on error.
*/
void smscore_unregister_device(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb;
int num_buffers = 0;
int retry = 0;
kmutex_lock(&g_smscore_deviceslock);
/* Release input device (IR) resources */
sms_ir_exit(coredev);
smscore_notify_clients(coredev);
smscore_notify_callbacks(coredev, NULL, 0);
/* at this point all buffers should be back
* onresponse must no longer be called */
while (1) {
while (!list_empty(&coredev->buffers)) {
cb = (struct smscore_buffer_t *) coredev->buffers.next;
list_del(&cb->entry);
kfree(cb);
num_buffers++;
}
if (num_buffers == coredev->num_buffers)
break;
if (++retry > 10) {
sms_info("exiting although "
"not all buffers released.");
break;
}
sms_info("waiting for %d buffer(s)",
coredev->num_buffers - num_buffers);
msleep(100);
}
sms_info("freed %d buffers", num_buffers);
if (coredev->common_buffer)
dma_free_coherent(NULL, coredev->common_buffer_size,
coredev->common_buffer, coredev->common_buffer_phys);
if (coredev->fw_buf != NULL)
kfree(coredev->fw_buf);
list_del(&coredev->entry);
kfree(coredev);
kmutex_unlock(&g_smscore_deviceslock);
sms_info("device %p destroyed", coredev);
}
EXPORT_SYMBOL_GPL(smscore_unregister_device);
static int smscore_detect_mode(struct smscore_device_t *coredev)
{
void *buffer = kmalloc(sizeof(struct SmsMsgHdr_ST) + SMS_DMA_ALIGNMENT,
GFP_KERNEL | GFP_DMA);
struct SmsMsgHdr_ST *msg =
(struct SmsMsgHdr_ST *) SMS_ALIGN_ADDRESS(buffer);
int rc;
if (!buffer)
return -ENOMEM;
SMS_INIT_MSG(msg, MSG_SMS_GET_VERSION_EX_REQ,
sizeof(struct SmsMsgHdr_ST));
rc = smscore_sendrequest_and_wait(coredev, msg, msg->msgLength,
&coredev->version_ex_done);
if (rc == -ETIME) {
sms_err("MSG_SMS_GET_VERSION_EX_REQ failed first try");
if (wait_for_completion_timeout(&coredev->resume_done,
msecs_to_jiffies(5000))) {
rc = smscore_sendrequest_and_wait(
coredev, msg, msg->msgLength,
&coredev->version_ex_done);
if (rc < 0)
sms_err("MSG_SMS_GET_VERSION_EX_REQ failed "
"second try, rc %d", rc);
} else
rc = -ETIME;
}
kfree(buffer);
return rc;
}
static char *smscore_fw_lkup[][SMS_NUM_OF_DEVICE_TYPES] = {
/*Stellar NOVA A0 Nova B0 VEGA*/
/*DVBT*/
{"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"},
/*DVBH*/
{"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"},
/*TDMB*/
{"none", "tdmb_nova_12mhz.inp", "tdmb_nova_12mhz_b0.inp", "none"},
/*DABIP*/
{"none", "none", "none", "none"},
/*BDA*/
{"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"},
/*ISDBT*/
{"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
/*ISDBTBDA*/
{"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
/*CMMB*/
{"none", "none", "none", "cmmb_vega_12mhz.inp"}
};
static inline char *sms_get_fw_name(struct smscore_device_t *coredev,
int mode, enum sms_device_type_st type)
{
char **fw = sms_get_board(smscore_get_board_id(coredev))->fw;
return (fw && fw[mode]) ? fw[mode] : smscore_fw_lkup[mode][type];
}
/**
* calls device handler to change mode of operation
* NOTE: stellar/usb may disconnect when changing mode
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param mode requested mode of operation
*
* @return 0 on success, <0 on error.
*/
int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
{
void *buffer;
int rc = 0;
enum sms_device_type_st type;
sms_debug("set device mode to %d", mode);
if (coredev->device_flags & SMS_DEVICE_FAMILY2) {
if (mode < DEVICE_MODE_DVBT || mode >= DEVICE_MODE_RAW_TUNER) {
sms_err("invalid mode specified %d", mode);
return -EINVAL;
}
smscore_registry_setmode(coredev->devpath, mode);
if (!(coredev->device_flags & SMS_DEVICE_NOT_READY)) {
rc = smscore_detect_mode(coredev);
if (rc < 0) {
sms_err("mode detect failed %d", rc);
return rc;
}
}
if (coredev->mode == mode) {
sms_info("device mode %d already set", mode);
return 0;
}
if (!(coredev->modes_supported & (1 << mode))) {
char *fw_filename;
type = smscore_registry_gettype(coredev->devpath);
fw_filename = sms_get_fw_name(coredev, mode, type);
rc = smscore_load_firmware_from_file(coredev,
fw_filename, NULL);
if (rc < 0) {
sms_warn("error %d loading firmware: %s, "
"trying again with default firmware",
rc, fw_filename);
/* try again with the default firmware */
fw_filename = smscore_fw_lkup[mode][type];
rc = smscore_load_firmware_from_file(coredev,
fw_filename, NULL);
if (rc < 0) {
sms_warn("error %d loading "
"firmware: %s", rc,
fw_filename);
return rc;
}
}
sms_log("firmware download success: %s", fw_filename);
} else
sms_info("mode %d supported by running "
"firmware", mode);
buffer = kmalloc(sizeof(struct SmsMsgData_ST) +
SMS_DMA_ALIGNMENT, GFP_KERNEL | GFP_DMA);
if (buffer) {
struct SmsMsgData_ST *msg =
(struct SmsMsgData_ST *)
SMS_ALIGN_ADDRESS(buffer);
SMS_INIT_MSG(&msg->xMsgHeader, MSG_SMS_INIT_DEVICE_REQ,
sizeof(struct SmsMsgData_ST));
msg->msgData[0] = mode;
rc = smscore_sendrequest_and_wait(
coredev, msg, msg->xMsgHeader.msgLength,
&coredev->init_device_done);
kfree(buffer);
} else {
sms_err("Could not allocate buffer for "
"init device message.");
rc = -ENOMEM;
}
} else {
if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_DVBT_BDA) {
sms_err("invalid mode specified %d", mode);
return -EINVAL;
}
smscore_registry_setmode(coredev->devpath, mode);
if (coredev->detectmode_handler)
coredev->detectmode_handler(coredev->context,
&coredev->mode);
if (coredev->mode != mode && coredev->setmode_handler)
rc = coredev->setmode_handler(coredev->context, mode);
}
if (rc >= 0) {
coredev->mode = mode;
coredev->device_flags &= ~SMS_DEVICE_NOT_READY;
}
if (rc < 0)
sms_err("return error code %d.", rc);
return rc;
}
/**
* calls device handler to get current mode of operation
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
* @return current mode
*/
int smscore_get_device_mode(struct smscore_device_t *coredev)
{
return coredev->mode;
}
EXPORT_SYMBOL_GPL(smscore_get_device_mode);
/**
* find client by response id & type within the clients list.
* return client handle or NULL.
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param data_type client data type (SMS_DONT_CARE for all types)
* @param id client id (SMS_DONT_CARE for all id)
*
*/
static struct
smscore_client_t *smscore_find_client(struct smscore_device_t *coredev,
int data_type, int id)
{
struct smscore_client_t *client = NULL;
struct list_head *next, *first;
unsigned long flags;
struct list_head *firstid, *nextid;
spin_lock_irqsave(&coredev->clientslock, flags);
first = &coredev->clients;
for (next = first->next;
(next != first) && !client;
next = next->next) {
firstid = &((struct smscore_client_t *)next)->idlist;
for (nextid = firstid->next;
nextid != firstid;
nextid = nextid->next) {
if ((((struct smscore_idlist_t *)nextid)->id == id) &&
(((struct smscore_idlist_t *)nextid)->data_type == data_type ||
(((struct smscore_idlist_t *)nextid)->data_type == 0))) {
client = (struct smscore_client_t *) next;
break;
}
}
}
spin_unlock_irqrestore(&coredev->clientslock, flags);
return client;
}
/**
* find client by response id/type, call clients onresponse handler
* return buffer to pool on error
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param cb pointer to response buffer descriptor
*
*/
void smscore_onresponse(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb) {
struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) ((u8 *) cb->p
+ cb->offset);
struct smscore_client_t *client;
int rc = -EBUSY;
static unsigned long last_sample_time; /* = 0; */
static int data_total; /* = 0; */
unsigned long time_now = jiffies_to_msecs(jiffies);
if (!last_sample_time)
last_sample_time = time_now;
if (time_now - last_sample_time > 10000) {
sms_debug("\ndata rate %d bytes/secs",
(int)((data_total * 1000) /
(time_now - last_sample_time)));
last_sample_time = time_now;
data_total = 0;
}
data_total += cb->size;
/* Do we need to re-route? */
if ((phdr->msgType == MSG_SMS_HO_PER_SLICES_IND) ||
(phdr->msgType == MSG_SMS_TRANSMISSION_IND)) {
if (coredev->mode == DEVICE_MODE_DVBT_BDA)
phdr->msgDstId = DVBT_BDA_CONTROL_MSG_ID;
}
client = smscore_find_client(coredev, phdr->msgType, phdr->msgDstId);
/* If no client registered for type & id,
* check for control client where type is not registered */
if (client)
rc = client->onresponse_handler(client->context, cb);
if (rc < 0) {
switch (phdr->msgType) {
case MSG_SMS_GET_VERSION_EX_RES:
{
struct SmsVersionRes_ST *ver =
(struct SmsVersionRes_ST *) phdr;
sms_debug("MSG_SMS_GET_VERSION_EX_RES "
"id %d prots 0x%x ver %d.%d",
ver->FirmwareId, ver->SupportedProtocols,
ver->RomVersionMajor, ver->RomVersionMinor);
coredev->mode = ver->FirmwareId == 255 ?
DEVICE_MODE_NONE : ver->FirmwareId;
coredev->modes_supported = ver->SupportedProtocols;
complete(&coredev->version_ex_done);
break;
}
case MSG_SMS_INIT_DEVICE_RES:
sms_debug("MSG_SMS_INIT_DEVICE_RES");
complete(&coredev->init_device_done);
break;
case MSG_SW_RELOAD_START_RES:
sms_debug("MSG_SW_RELOAD_START_RES");
complete(&coredev->reload_start_done);
break;
case MSG_SMS_DATA_DOWNLOAD_RES:
complete(&coredev->data_download_done);
break;
case MSG_SW_RELOAD_EXEC_RES:
sms_debug("MSG_SW_RELOAD_EXEC_RES");
break;
case MSG_SMS_SWDOWNLOAD_TRIGGER_RES:
sms_debug("MSG_SMS_SWDOWNLOAD_TRIGGER_RES");
complete(&coredev->trigger_done);
break;
case MSG_SMS_SLEEP_RESUME_COMP_IND:
complete(&coredev->resume_done);
break;
case MSG_SMS_GPIO_CONFIG_EX_RES:
sms_debug("MSG_SMS_GPIO_CONFIG_EX_RES");
complete(&coredev->gpio_configuration_done);
break;
case MSG_SMS_GPIO_SET_LEVEL_RES:
sms_debug("MSG_SMS_GPIO_SET_LEVEL_RES");
complete(&coredev->gpio_set_level_done);
break;
case MSG_SMS_GPIO_GET_LEVEL_RES:
{
u32 *msgdata = (u32 *) phdr;
coredev->gpio_get_res = msgdata[1];
sms_debug("MSG_SMS_GPIO_GET_LEVEL_RES gpio level %d",
coredev->gpio_get_res);
complete(&coredev->gpio_get_level_done);
break;
}
case MSG_SMS_START_IR_RES:
complete(&coredev->ir_init_done);
break;
case MSG_SMS_IR_SAMPLES_IND:
sms_ir_event(coredev,
(const char *)
((char *)phdr
+ sizeof(struct SmsMsgHdr_ST)),
(int)phdr->msgLength
- sizeof(struct SmsMsgHdr_ST));
break;
default:
break;
}
smscore_putbuffer(coredev, cb);
}
}
EXPORT_SYMBOL_GPL(smscore_onresponse);
/**
* return pointer to next free buffer descriptor from core pool
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
*
* @return pointer to descriptor on success, NULL on error.
*/
struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
unsigned long flags;
spin_lock_irqsave(&coredev->bufferslock, flags);
if (!list_empty(&coredev->buffers)) {
cb = (struct smscore_buffer_t *) coredev->buffers.next;
list_del(&cb->entry);
}
spin_unlock_irqrestore(&coredev->bufferslock, flags);
return cb;
}
struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
{
struct smscore_buffer_t *cb = NULL;
wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
return cb;
}
EXPORT_SYMBOL_GPL(smscore_getbuffer);
/**
* return buffer descriptor to a pool
*
* @param coredev pointer to a coredev object returned by
* smscore_register_device
* @param cb pointer buffer descriptor
*
*/
void smscore_putbuffer(struct smscore_device_t *coredev,
struct smscore_buffer_t *cb) {
wake_up_interruptible(&coredev->buffer_mng_waitq);
list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock);
}
EXPORT_SYMBOL_GPL(smscore_putbuffer);
static int smscore_validate_client(struct smscore_device_t *coredev,
struct smscore_client_t *client,
int data_type, int id)
{
struct smscore_idlist_t *listentry;
struct smscore_client_t *registered_client;
if (!client) {
sms_err("bad parameter.");
return -EFAULT;
}
registered_client = smscore_find_client(coredev, data_type, id);
if (registered_client == client)
return 0;
if (registered_client) {
sms_err("The msg ID already registered to another client.");
return -EEXIST;
}
listentry = kzalloc(sizeof(struct smscore_idlist_t), GFP_KERNEL);
if (!listentry) {
sms_err("Can't allocate memory for client id.");
return -ENOMEM;
}
listentry->id = id;
listentry->data_type = data_type;
list_add_locked(&listentry->entry, &client->idlist,
&coredev->clientslock);
return 0;
}
/**
* creates smsclient object, check that id is taken by another client
*
* @param coredev pointer to a coredev object from clients hotplug
* @param initial_id all messages with this id would be sent to this client
* @param data_type all messages of this type would be sent to this client
* @param onresponse_handler client handler that is called to
* process incoming messages
* @param onremove_handler client handler that is called when device is removed
* @param context client-specific context
* @param client pointer to a value that receives created smsclient object
*
* @return 0 on success, <0 on error.
*/
int smscore_register_client(struct smscore_device_t *coredev,
struct smsclient_params_t *params,
struct smscore_client_t **client)
{
struct smscore_client_t *newclient;
/* check that no other channel with same parameters exists */
if (smscore_find_client(coredev, params->data_type,
params->initial_id)) {
sms_err("Client already exist.");
return -EEXIST;
}
newclient = kzalloc(sizeof(struct smscore_client_t), GFP_KERNEL);
if (!newclient) {
sms_err("Failed to allocate memory for client.");
return -ENOMEM;
}
INIT_LIST_HEAD(&newclient->idlist);
newclient->coredev = coredev;
newclient->onresponse_handler = params->onresponse_handler;
newclient->onremove_handler = params->onremove_handler;
newclient->context = params->context;
list_add_locked(&newclient->entry, &coredev->clients,
&coredev->clientslock);
smscore_validate_client(coredev, newclient, params->data_type,
params->initial_id);
*client = newclient;
sms_debug("%p %d %d", params->context, params->data_type,
params->initial_id);
return 0;
}
EXPORT_SYMBOL_GPL(smscore_register_client);
/**
* frees smsclient object and all subclients associated with it
*
* @param client pointer to smsclient object returned by
* smscore_register_client
*
*/
void smscore_unregister_client(struct smscore_client_t *client)
{
struct smscore_device_t *coredev = client->coredev;
unsigned long flags;
spin_lock_irqsave(&coredev->clientslock, flags);
while (!list_empty(&client->idlist)) {
struct smscore_idlist_t *identry =
(struct smscore_idlist_t *) client->idlist.next;
list_del(&identry->entry);
kfree(identry);
}
sms_info("%p", client->context);
list_del(&client->entry);
kfree(client);
spin_unlock_irqrestore(&coredev->clientslock, flags);
}
EXPORT_SYMBOL_GPL(smscore_unregister_client);
/**
* verifies that source id is not taken by another client,
* calls device handler to send requests to the device
*
* @param client pointer to smsclient object returned by
* smscore_register_client
* @param buffer pointer to a request buffer
* @param size size (in bytes) of request buffer
*
* @return 0 on success, <0 on error.
*/
int smsclient_sendrequest(struct smscore_client_t *client,
void *buffer, size_t size)
{
struct smscore_device_t *coredev;
struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) buffer;
int rc;
if (client == NULL) {
sms_err("Got NULL client");
return -EINVAL;
}
coredev = client->coredev;
/* check that no other channel with same id exists */
if (coredev == NULL) {
sms_err("Got NULL coredev");
return -EINVAL;
}
rc = smscore_validate_client(client->coredev, client, 0,
phdr->msgSrcId);
if (rc < 0)
return rc;
return coredev->sendrequest_handler(coredev->context, buffer, size);
}
EXPORT_SYMBOL_GPL(smsclient_sendrequest);
/* old GPIO managements implementation */
int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
struct smscore_config_gpio *pinconfig)
{
struct {
struct SmsMsgHdr_ST hdr;
u32 data[6];
} msg;
if (coredev->device_flags & SMS_DEVICE_FAMILY2) {
msg.hdr.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
msg.hdr.msgDstId = HIF_TASK;
msg.hdr.msgFlags = 0;
msg.hdr.msgType = MSG_SMS_GPIO_CONFIG_EX_REQ;
msg.hdr.msgLength = sizeof(msg);
msg.data[0] = pin;
msg.data[1] = pinconfig->pullupdown;
/* Convert slew rate for Nova: Fast(0) = 3 / Slow(1) = 0; */
msg.data[2] = pinconfig->outputslewrate == 0 ? 3 : 0;
switch (pinconfig->outputdriving) {
case SMS_GPIO_OUTPUTDRIVING_16mA:
msg.data[3] = 7; /* Nova - 16mA */
break;
case SMS_GPIO_OUTPUTDRIVING_12mA:
msg.data[3] = 5; /* Nova - 11mA */
break;
case SMS_GPIO_OUTPUTDRIVING_8mA:
msg.data[3] = 3; /* Nova - 7mA */
break;
case SMS_GPIO_OUTPUTDRIVING_4mA:
default:
msg.data[3] = 2; /* Nova - 4mA */
break;
}
msg.data[4] = pinconfig->direction;
msg.data[5] = 0;
} else /* TODO: SMS_DEVICE_FAMILY1 */
return -EINVAL;
return coredev->sendrequest_handler(coredev->context,
&msg, sizeof(msg));
}
int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level)
{
struct {
struct SmsMsgHdr_ST hdr;
u32 data[3];
} msg;
if (pin > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
msg.hdr.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
msg.hdr.msgDstId = HIF_TASK;
msg.hdr.msgFlags = 0;
msg.hdr.msgType = MSG_SMS_GPIO_SET_LEVEL_REQ;
msg.hdr.msgLength = sizeof(msg);
msg.data[0] = pin;
msg.data[1] = level ? 1 : 0;
msg.data[2] = 0;
return coredev->sendrequest_handler(coredev->context,
&msg, sizeof(msg));
}
/* new GPIO management implementation */
static int GetGpioPinParams(u32 PinNum, u32 *pTranslatedPinNum,
u32 *pGroupNum, u32 *pGroupCfg) {
*pGroupCfg = 1;
if (PinNum <= 1) {
*pTranslatedPinNum = 0;
*pGroupNum = 9;
*pGroupCfg = 2;
} else if (PinNum >= 2 && PinNum <= 6) {
*pTranslatedPinNum = 2;
*pGroupNum = 0;
*pGroupCfg = 2;
} else if (PinNum >= 7 && PinNum <= 11) {
*pTranslatedPinNum = 7;
*pGroupNum = 1;
} else if (PinNum >= 12 && PinNum <= 15) {
*pTranslatedPinNum = 12;
*pGroupNum = 2;
*pGroupCfg = 3;
} else if (PinNum == 16) {
*pTranslatedPinNum = 16;
*pGroupNum = 23;
} else if (PinNum >= 17 && PinNum <= 24) {
*pTranslatedPinNum = 17;
*pGroupNum = 3;
} else if (PinNum == 25) {
*pTranslatedPinNum = 25;
*pGroupNum = 6;
} else if (PinNum >= 26 && PinNum <= 28) {
*pTranslatedPinNum = 26;
*pGroupNum = 4;
} else if (PinNum == 29) {
*pTranslatedPinNum = 29;
*pGroupNum = 5;
*pGroupCfg = 2;
} else if (PinNum == 30) {
*pTranslatedPinNum = 30;
*pGroupNum = 8;
} else if (PinNum == 31) {
*pTranslatedPinNum = 31;
*pGroupNum = 17;
} else
return -1;
*pGroupCfg <<= 24;
return 0;
}
int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
struct smscore_gpio_config *pGpioConfig) {
u32 totalLen;
u32 TranslatedPinNum = 0;
u32 GroupNum = 0;
u32 ElectricChar;
u32 groupCfg;
void *buffer;
int rc;
struct SetGpioMsg {
struct SmsMsgHdr_ST xMsgHeader;
u32 msgData[6];
} *pMsg;
if (PinNum > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
if (pGpioConfig == NULL)
return -EINVAL;
totalLen = sizeof(struct SmsMsgHdr_ST) + (sizeof(u32) * 6);
buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
pMsg->xMsgHeader.msgDstId = HIF_TASK;
pMsg->xMsgHeader.msgFlags = 0;
pMsg->xMsgHeader.msgLength = (u16) totalLen;
pMsg->msgData[0] = PinNum;
if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) {
pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_REQ;
if (GetGpioPinParams(PinNum, &TranslatedPinNum, &GroupNum,
&groupCfg) != 0) {
rc = -EINVAL;
goto free;
}
pMsg->msgData[1] = TranslatedPinNum;
pMsg->msgData[2] = GroupNum;
ElectricChar = (pGpioConfig->PullUpDown)
| (pGpioConfig->InputCharacteristics << 2)
| (pGpioConfig->OutputSlewRate << 3)
| (pGpioConfig->OutputDriving << 4);
pMsg->msgData[3] = ElectricChar;
pMsg->msgData[4] = pGpioConfig->Direction;
pMsg->msgData[5] = groupCfg;
} else {
pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_EX_REQ;
pMsg->msgData[1] = pGpioConfig->PullUpDown;
pMsg->msgData[2] = pGpioConfig->OutputSlewRate;
pMsg->msgData[3] = pGpioConfig->OutputDriving;
pMsg->msgData[4] = pGpioConfig->Direction;
pMsg->msgData[5] = 0;
}
smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
&coredev->gpio_configuration_done);
if (rc != 0) {
if (rc == -ETIME)
sms_err("smscore_gpio_configure timeout");
else
sms_err("smscore_gpio_configure error");
}
free:
kfree(buffer);
return rc;
}
int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
u8 NewLevel) {
u32 totalLen;
int rc;
void *buffer;
struct SetGpioMsg {
struct SmsMsgHdr_ST xMsgHeader;
u32 msgData[3]; /* keep it 3 ! */
} *pMsg;
if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER))
return -EINVAL;
totalLen = sizeof(struct SmsMsgHdr_ST) +
(3 * sizeof(u32)); /* keep it 3 ! */
buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
pMsg->xMsgHeader.msgDstId = HIF_TASK;
pMsg->xMsgHeader.msgFlags = 0;
pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_SET_LEVEL_REQ;
pMsg->xMsgHeader.msgLength = (u16) totalLen;
pMsg->msgData[0] = PinNum;
pMsg->msgData[1] = NewLevel;
/* Send message to SMS */
smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
&coredev->gpio_set_level_done);
if (rc != 0) {
if (rc == -ETIME)
sms_err("smscore_gpio_set_level timeout");
else
sms_err("smscore_gpio_set_level error");
}
kfree(buffer);
return rc;
}
int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 PinNum,
u8 *level) {
u32 totalLen;
int rc;
void *buffer;
struct SetGpioMsg {
struct SmsMsgHdr_ST xMsgHeader;
u32 msgData[2];
} *pMsg;
if (PinNum > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
totalLen = sizeof(struct SmsMsgHdr_ST) + (2 * sizeof(u32));
buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
pMsg->xMsgHeader.msgDstId = HIF_TASK;
pMsg->xMsgHeader.msgFlags = 0;
pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_GET_LEVEL_REQ;
pMsg->xMsgHeader.msgLength = (u16) totalLen;
pMsg->msgData[0] = PinNum;
pMsg->msgData[1] = 0;
/* Send message to SMS */
smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
&coredev->gpio_get_level_done);
if (rc != 0) {
if (rc == -ETIME)
sms_err("smscore_gpio_get_level timeout");
else
sms_err("smscore_gpio_get_level error");
}
kfree(buffer);
/* Its a race between other gpio_get_level() and the copy of the single
* global 'coredev->gpio_get_res' to the function's variable 'level'
*/
*level = coredev->gpio_get_res;
return rc;
}
static int __init smscore_module_init(void)
{
int rc = 0;
INIT_LIST_HEAD(&g_smscore_notifyees);
INIT_LIST_HEAD(&g_smscore_devices);
kmutex_init(&g_smscore_deviceslock);
INIT_LIST_HEAD(&g_smscore_registry);
kmutex_init(&g_smscore_registrylock);
return rc;
}
static void __exit smscore_module_exit(void)
{
kmutex_lock(&g_smscore_deviceslock);
while (!list_empty(&g_smscore_notifyees)) {
struct smscore_device_notifyee_t *notifyee =
(struct smscore_device_notifyee_t *)
g_smscore_notifyees.next;
list_del(¬ifyee->entry);
kfree(notifyee);
}
kmutex_unlock(&g_smscore_deviceslock);
kmutex_lock(&g_smscore_registrylock);
while (!list_empty(&g_smscore_registry)) {
struct smscore_registry_entry_t *entry =
(struct smscore_registry_entry_t *)
g_smscore_registry.next;
list_del(&entry->entry);
kfree(entry);
}
kmutex_unlock(&g_smscore_registrylock);
sms_debug("");
}
module_init(smscore_module_init);
module_exit(smscore_module_exit);
MODULE_DESCRIPTION("Siano MDTV Core module");
MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
eckucukoglu/sober-kernel | drivers/staging/line6/variax.c | 2675 | 5964 | /*
* Line6 Linux USB driver - 0.9.1beta
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/slab.h>
#include "audio.h"
#include "driver.h"
#include "variax.h"
#define VARIAX_OFFSET_ACTIVATE 7
/*
This message is sent by the device during initialization and identifies
the connected guitar version.
*/
static const char variax_init_version[] = {
0xf0, 0x7e, 0x7f, 0x06, 0x02, 0x00, 0x01, 0x0c,
0x07, 0x00, 0x00, 0x00
};
/*
This message is the last one sent by the device during initialization.
*/
static const char variax_init_done[] = {
0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x6b
};
static const char variax_activate[] = {
0xf0, 0x00, 0x01, 0x0c, 0x07, 0x00, 0x2a, 0x01,
0xf7
};
/* forward declarations: */
static void variax_startup2(unsigned long data);
static void variax_startup4(unsigned long data);
static void variax_startup5(unsigned long data);
static void variax_activate_async(struct usb_line6_variax *variax, int a)
{
variax->buffer_activate[VARIAX_OFFSET_ACTIVATE] = a;
line6_send_raw_message_async(&variax->line6, variax->buffer_activate,
sizeof(variax_activate));
}
/*
Variax startup procedure.
This is a sequence of functions with special requirements (e.g., must
not run immediately after initialization, must not run in interrupt
context). After the last one has finished, the device is ready to use.
*/
static void variax_startup1(struct usb_line6_variax *variax)
{
CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_INIT);
/* delay startup procedure: */
line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1,
variax_startup2, (unsigned long)variax);
}
static void variax_startup2(unsigned long data)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
struct usb_line6 *line6 = &variax->line6;
/* schedule another startup procedure until startup is complete: */
if (variax->startup_progress >= VARIAX_STARTUP_LAST)
return;
variax->startup_progress = VARIAX_STARTUP_VERSIONREQ;
line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1,
variax_startup2, (unsigned long)variax);
/* request firmware version: */
line6_version_request_async(line6);
}
static void variax_startup3(struct usb_line6_variax *variax)
{
CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_WAIT);
/* delay startup procedure: */
line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY3,
variax_startup4, (unsigned long)variax);
}
static void variax_startup4(unsigned long data)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_ACTIVATE);
/* activate device: */
variax_activate_async(variax, 1);
line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY4,
variax_startup5, (unsigned long)variax);
}
static void variax_startup5(unsigned long data)
{
struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_WORKQUEUE);
/* schedule work for global work queue: */
schedule_work(&variax->startup_work);
}
static void variax_startup6(struct work_struct *work)
{
struct usb_line6_variax *variax =
container_of(work, struct usb_line6_variax, startup_work);
CHECK_STARTUP_PROGRESS(variax->startup_progress, VARIAX_STARTUP_SETUP);
/* ALSA audio interface: */
line6_register_audio(&variax->line6);
}
/*
Process a completely received message.
*/
void line6_variax_process_message(struct usb_line6_variax *variax)
{
const unsigned char *buf = variax->line6.buffer_message;
switch (buf[0]) {
case LINE6_RESET:
dev_info(variax->line6.ifcdev, "VARIAX reset\n");
break;
case LINE6_SYSEX_BEGIN:
if (memcmp(buf + 1, variax_init_version + 1,
sizeof(variax_init_version) - 1) == 0) {
variax_startup3(variax);
} else if (memcmp(buf + 1, variax_init_done + 1,
sizeof(variax_init_done) - 1) == 0) {
/* notify of complete initialization: */
variax_startup4((unsigned long)variax);
}
break;
}
}
/*
Variax destructor.
*/
static void variax_destruct(struct usb_interface *interface)
{
struct usb_line6_variax *variax = usb_get_intfdata(interface);
if (variax == NULL)
return;
line6_cleanup_audio(&variax->line6);
del_timer(&variax->startup_timer1);
del_timer(&variax->startup_timer2);
cancel_work_sync(&variax->startup_work);
kfree(variax->buffer_activate);
}
/*
Try to init workbench device.
*/
static int variax_try_init(struct usb_interface *interface,
struct usb_line6_variax *variax)
{
int err;
init_timer(&variax->startup_timer1);
init_timer(&variax->startup_timer2);
INIT_WORK(&variax->startup_work, variax_startup6);
if ((interface == NULL) || (variax == NULL))
return -ENODEV;
/* initialize USB buffers: */
variax->buffer_activate = kmemdup(variax_activate,
sizeof(variax_activate), GFP_KERNEL);
if (variax->buffer_activate == NULL) {
dev_err(&interface->dev, "Out of memory\n");
return -ENOMEM;
}
/* initialize audio system: */
err = line6_init_audio(&variax->line6);
if (err < 0)
return err;
/* initialize MIDI subsystem: */
err = line6_init_midi(&variax->line6);
if (err < 0)
return err;
/* initiate startup procedure: */
variax_startup1(variax);
return 0;
}
/*
Init workbench device (and clean up in case of failure).
*/
int line6_variax_init(struct usb_interface *interface,
struct usb_line6_variax *variax)
{
int err = variax_try_init(interface, variax);
if (err < 0)
variax_destruct(interface);
return err;
}
/*
Workbench device disconnected.
*/
void line6_variax_disconnect(struct usb_interface *interface)
{
if (interface == NULL)
return;
variax_destruct(interface);
}
| gpl-2.0 |
omnirom/android_kernel_moto_shamu | drivers/isdn/mISDN/l1oip_core.c | 2675 | 40554 | /*
* l1oip.c low level driver for tunneling layer 1 over IP
*
* NOTE: It is not compatible with TDMoIP nor "ISDN over IP".
*
* Author Andreas Eversberg (jolly@eversberg.eu)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
/* module parameters:
* type:
Value 1 = BRI
Value 2 = PRI
Value 3 = BRI (multi channel frame, not supported yet)
Value 4 = PRI (multi channel frame, not supported yet)
A multi channel frame reduces overhead to a single frame for all
b-channels, but increases delay.
(NOTE: Multi channel frames are not implemented yet.)
* codec:
Value 0 = transparent (default)
Value 1 = transfer ALAW
Value 2 = transfer ULAW
Value 3 = transfer generic 4 bit compression.
* ulaw:
0 = we use a-Law (default)
1 = we use u-Law
* limit:
limitation of B-channels to control bandwidth (1...126)
BRI: 1 or 2
PRI: 1-30, 31-126 (126, because dchannel ist not counted here)
Also limited ressources are used for stack, resulting in less channels.
It is possible to have more channels than 30 in PRI mode, this must
be supported by the application.
* ip:
byte representation of remote ip address (127.0.0.1 -> 127,0,0,1)
If not given or four 0, no remote address is set.
For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1)
* port:
port number (local interface)
If not given or 0, port 931 is used for fist instance, 932 for next...
For multiple interfaces, different ports must be given.
* remoteport:
port number (remote interface)
If not given or 0, remote port equals local port
For multiple interfaces on equal sites, different ports must be given.
* ondemand:
0 = fixed (always transmit packets, even when remote side timed out)
1 = on demand (only transmit packets, when remote side is detected)
the default is 0
NOTE: ID must also be set for on demand.
* id:
optional value to identify frames. This value must be equal on both
peers and should be random. If omitted or 0, no ID is transmitted.
* debug:
NOTE: only one debug value must be given for all cards
enable debugging (see l1oip.h for debug options)
Special mISDN controls:
op = MISDN_CTRL_SETPEER*
p1 = bytes 0-3 : remote IP address in network order (left element first)
p2 = bytes 1-2 : remote port in network order (high byte first)
optional:
p2 = bytes 3-4 : local port in network order (high byte first)
op = MISDN_CTRL_UNSETPEER*
* Use l1oipctrl for comfortable setting or removing ip address.
(Layer 1 Over IP CTRL)
L1oIP-Protocol
--------------
Frame Header:
7 6 5 4 3 2 1 0
+---------------+
|Ver|T|I|Coding |
+---------------+
| ID byte 3 * |
+---------------+
| ID byte 2 * |
+---------------+
| ID byte 1 * |
+---------------+
| ID byte 0 * |
+---------------+
|M| Channel |
+---------------+
| Length * |
+---------------+
| Time Base MSB |
+---------------+
| Time Base LSB |
+---------------+
| Data.... |
...
| |
+---------------+
|M| Channel |
+---------------+
| Length * |
+---------------+
| Time Base MSB |
+---------------+
| Time Base LSB |
+---------------+
| Data.... |
...
* Only included in some cases.
- Ver = Version
If version is missmatch, the frame must be ignored.
- T = Type of interface
Must be 0 for S0 or 1 for E1.
- I = Id present
If bit is set, four ID bytes are included in frame.
- ID = Connection ID
Additional ID to prevent Denial of Service attacs. Also it prevents hijacking
connections with dynamic IP. The ID should be random and must not be 0.
- Coding = Type of codec
Must be 0 for no transcoding. Also for D-channel and other HDLC frames.
1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec.
3 is used for generic table compressor.
- M = More channels to come. If this flag is 1, the following byte contains
the length of the channel data. After the data block, the next channel will
be defined. The flag for the last channel block (or if only one channel is
transmitted), must be 0 and no length is given.
- Channel = Channel number
0 reserved
1-3 channel data for S0 (3 is D-channel)
1-31 channel data for E1 (16 is D-channel)
32-127 channel data for extended E1 (16 is D-channel)
- The length is used if the M-flag is 1. It is used to find the next channel
inside frame.
NOTE: A value of 0 equals 256 bytes of data.
-> For larger data blocks, a single frame must be used.
-> For larger streams, a single frame or multiple blocks with same channel ID
must be used.
- Time Base = Timestamp of first sample in frame
The "Time Base" is used to rearange packets and to detect packet loss.
The 16 bits are sent in network order (MSB first) and count 1/8000 th of a
second. This causes a wrap around each 8,192 seconds. There is no requirement
for the initial "Time Base", but 0 should be used for the first packet.
In case of HDLC data, this timestamp counts the packet or byte number.
Two Timers:
After initialisation, a timer of 15 seconds is started. Whenever a packet is
transmitted, the timer is reset to 15 seconds again. If the timer expires, an
empty packet is transmitted. This keep the connection alive.
When a valid packet is received, a timer 65 seconds is started. The interface
become ACTIVE. If the timer expires, the interface becomes INACTIVE.
Dynamic IP handling:
To allow dynamic IP, the ID must be non 0. In this case, any packet with the
correct port number and ID will be accepted. If the remote side changes its IP
the new IP is used for all transmitted packets until it changes again.
On Demand:
If the ondemand parameter is given, the remote IP is set to 0 on timeout.
This will stop keepalive traffic to remote. If the remote is online again,
traffic will continue to the remote address. This is useful for road warriors.
This feature only works with ID set, otherwhise it is highly unsecure.
Socket and Thread
-----------------
The complete socket opening and closing is done by a thread.
When the thread opened a socket, the hc->socket descriptor is set. Whenever a
packet shall be sent to the socket, the hc->socket must be checked wheter not
NULL. To prevent change in socket descriptor, the hc->socket_lock must be used.
To change the socket, a recall of l1oip_socket_open() will safely kill the
socket process and create a new one.
*/
#define L1OIP_VERSION 0 /* 0...3 */
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mISDNif.h>
#include <linux/mISDNhw.h>
#include <linux/mISDNdsp.h>
#include <linux/init.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/sock.h>
#include "core.h"
#include "l1oip.h"
static const char *l1oip_revision = "2.00";
static int l1oip_cnt;
static spinlock_t l1oip_lock;
static struct list_head l1oip_ilist;
#define MAX_CARDS 16
static u_int type[MAX_CARDS];
static u_int codec[MAX_CARDS];
static u_int ip[MAX_CARDS * 4];
static u_int port[MAX_CARDS];
static u_int remoteport[MAX_CARDS];
static u_int ondemand[MAX_CARDS];
static u_int limit[MAX_CARDS];
static u_int id[MAX_CARDS];
static int debug;
static int ulaw;
MODULE_AUTHOR("Andreas Eversberg");
MODULE_LICENSE("GPL");
module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR);
module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR);
module_param(ulaw, uint, S_IRUGO | S_IWUSR);
module_param(debug, uint, S_IRUGO | S_IWUSR);
/*
* send a frame via socket, if open and restart timer
*/
static int
l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
u8 frame[len + 32];
struct socket *socket = NULL;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n",
__func__, len);
p = frame;
/* restart timer */
if ((int)(hc->keep_tl.expires-jiffies) < 5 * HZ) {
del_timer(&hc->keep_tl);
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
add_timer(&hc->keep_tl);
} else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: resetting timer\n", __func__);
/* drop if we have no remote ip or port */
if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: dropping frame, because remote "
"IP is not set.\n", __func__);
return len;
}
/* assemble frame */
*p++ = (L1OIP_VERSION << 6) /* version and coding */
| (hc->pri ? 0x20 : 0x00) /* type */
| (hc->id ? 0x10 : 0x00) /* id */
| localcodec;
if (hc->id) {
*p++ = hc->id >> 24; /* id */
*p++ = hc->id >> 16;
*p++ = hc->id >> 8;
*p++ = hc->id;
}
*p++ = 0x00 + channel; /* m-flag, channel */
*p++ = timebase >> 8; /* time base */
*p++ = timebase;
if (buf && len) { /* add data to frame */
if (localcodec == 1 && ulaw)
l1oip_ulaw_to_alaw(buf, len, p);
else if (localcodec == 2 && !ulaw)
l1oip_alaw_to_ulaw(buf, len, p);
else if (localcodec == 3)
len = l1oip_law_to_4bit(buf, len, p,
&hc->chan[channel].codecstate);
else
memcpy(p, buf, len);
}
len += p - frame;
/* check for socket in safe condition */
spin_lock(&hc->socket_lock);
if (!hc->socket) {
spin_unlock(&hc->socket_lock);
return 0;
}
/* seize socket */
socket = hc->socket;
hc->socket = NULL;
spin_unlock(&hc->socket_lock);
/* send packet */
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: sending packet to socket (len "
"= %d)\n", __func__, len);
hc->sendiov.iov_base = frame;
hc->sendiov.iov_len = len;
len = kernel_sendmsg(socket, &hc->sendmsg, &hc->sendiov, 1, len);
/* give socket back */
hc->socket = socket; /* no locking required */
return len;
}
/*
* receive channel data from socket
*/
static void
l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
u8 *buf, int len)
{
struct sk_buff *nskb;
struct bchannel *bch;
struct dchannel *dch;
u8 *p;
u32 rx_counter;
if (len == 0) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received empty keepalive data, "
"ignoring\n", __func__);
return;
}
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n",
__func__, len);
if (channel < 1 || channel > 127) {
printk(KERN_WARNING "%s: packet error - channel %d out of "
"range\n", __func__, channel);
return;
}
dch = hc->chan[channel].dch;
bch = hc->chan[channel].bch;
if (!dch && !bch) {
printk(KERN_WARNING "%s: packet error - channel %d not in "
"stack\n", __func__, channel);
return;
}
/* prepare message */
nskb = mI_alloc_skb((remotecodec == 3) ? (len << 1) : len, GFP_ATOMIC);
if (!nskb) {
printk(KERN_ERR "%s: No mem for skb.\n", __func__);
return;
}
p = skb_put(nskb, (remotecodec == 3) ? (len << 1) : len);
if (remotecodec == 1 && ulaw)
l1oip_alaw_to_ulaw(buf, len, p);
else if (remotecodec == 2 && !ulaw)
l1oip_ulaw_to_alaw(buf, len, p);
else if (remotecodec == 3)
len = l1oip_4bit_to_law(buf, len, p);
else
memcpy(p, buf, len);
/* send message up */
if (dch && len >= 2) {
dch->rx_skb = nskb;
recv_Dchannel(dch);
}
if (bch) {
/* expand 16 bit sequence number to 32 bit sequence number */
rx_counter = hc->chan[channel].rx_counter;
if (((s16)(timebase - rx_counter)) >= 0) {
/* time has changed forward */
if (timebase >= (rx_counter & 0xffff))
rx_counter =
(rx_counter & 0xffff0000) | timebase;
else
rx_counter = ((rx_counter & 0xffff0000) + 0x10000)
| timebase;
} else {
/* time has changed backwards */
if (timebase < (rx_counter & 0xffff))
rx_counter =
(rx_counter & 0xffff0000) | timebase;
else
rx_counter = ((rx_counter & 0xffff0000) - 0x10000)
| timebase;
}
hc->chan[channel].rx_counter = rx_counter;
#ifdef REORDER_DEBUG
if (hc->chan[channel].disorder_flag) {
struct sk_buff *skb;
int cnt;
skb = hc->chan[channel].disorder_skb;
hc->chan[channel].disorder_skb = nskb;
nskb = skb;
cnt = hc->chan[channel].disorder_cnt;
hc->chan[channel].disorder_cnt = rx_counter;
rx_counter = cnt;
}
hc->chan[channel].disorder_flag ^= 1;
if (nskb)
#endif
queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
}
}
/*
* parse frame and extract channel data
*/
static void
l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len)
{
u32 packet_id;
u8 channel;
u8 remotecodec;
u16 timebase;
int m, mlen;
int len_start = len; /* initial frame length */
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n",
__func__, len);
/* check length */
if (len < 1 + 1 + 2) {
printk(KERN_WARNING "%s: packet error - length %d below "
"4 bytes\n", __func__, len);
return;
}
/* check version */
if (((*buf) >> 6) != L1OIP_VERSION) {
printk(KERN_WARNING "%s: packet error - unknown version %d\n",
__func__, buf[0]>>6);
return;
}
/* check type */
if (((*buf) & 0x20) && !hc->pri) {
printk(KERN_WARNING "%s: packet error - received E1 packet "
"on S0 interface\n", __func__);
return;
}
if (!((*buf) & 0x20) && hc->pri) {
printk(KERN_WARNING "%s: packet error - received S0 packet "
"on E1 interface\n", __func__);
return;
}
/* get id flag */
packet_id = (*buf >> 4) & 1;
/* check coding */
remotecodec = (*buf) & 0x0f;
if (remotecodec > 3) {
printk(KERN_WARNING "%s: packet error - remotecodec %d "
"unsupported\n", __func__, remotecodec);
return;
}
buf++;
len--;
/* check packet_id */
if (packet_id) {
if (!hc->id) {
printk(KERN_WARNING "%s: packet error - packet has id "
"0x%x, but we have not\n", __func__, packet_id);
return;
}
if (len < 4) {
printk(KERN_WARNING "%s: packet error - packet too "
"short for ID value\n", __func__);
return;
}
packet_id = (*buf++) << 24;
packet_id += (*buf++) << 16;
packet_id += (*buf++) << 8;
packet_id += (*buf++);
len -= 4;
if (packet_id != hc->id) {
printk(KERN_WARNING "%s: packet error - ID mismatch, "
"got 0x%x, we 0x%x\n",
__func__, packet_id, hc->id);
return;
}
} else {
if (hc->id) {
printk(KERN_WARNING "%s: packet error - packet has no "
"ID, but we have\n", __func__);
return;
}
}
multiframe:
if (len < 1) {
printk(KERN_WARNING "%s: packet error - packet too short, "
"channel expected at position %d.\n",
__func__, len-len_start + 1);
return;
}
/* get channel and multiframe flag */
channel = *buf & 0x7f;
m = *buf >> 7;
buf++;
len--;
/* check length on multiframe */
if (m) {
if (len < 1) {
printk(KERN_WARNING "%s: packet error - packet too "
"short, length expected at position %d.\n",
__func__, len_start - len - 1);
return;
}
mlen = *buf++;
len--;
if (mlen == 0)
mlen = 256;
if (len < mlen + 3) {
printk(KERN_WARNING "%s: packet error - length %d at "
"position %d exceeds total length %d.\n",
__func__, mlen, len_start-len - 1, len_start);
return;
}
if (len == mlen + 3) {
printk(KERN_WARNING "%s: packet error - length %d at "
"position %d will not allow additional "
"packet.\n",
__func__, mlen, len_start-len + 1);
return;
}
} else
mlen = len - 2; /* single frame, subtract timebase */
if (len < 2) {
printk(KERN_WARNING "%s: packet error - packet too short, time "
"base expected at position %d.\n",
__func__, len-len_start + 1);
return;
}
/* get time base */
timebase = (*buf++) << 8;
timebase |= (*buf++);
len -= 2;
/* if inactive, we send up a PH_ACTIVATE and activate */
if (!test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become active due to "
"received packet\n", __func__);
test_and_set_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
/* distribute packet */
l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen);
buf += mlen;
len -= mlen;
/* multiframe */
if (m)
goto multiframe;
/* restart timer */
if ((int)(hc->timeout_tl.expires-jiffies) < 5 * HZ || !hc->timeout_on) {
hc->timeout_on = 1;
del_timer(&hc->timeout_tl);
hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
add_timer(&hc->timeout_tl);
} else /* only adjust timer */
hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
/* if ip or source port changes */
if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr)
|| (hc->sin_remote.sin_port != sin->sin_port)) {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: remote address changes from "
"0x%08x to 0x%08x (port %d to %d)\n", __func__,
ntohl(hc->sin_remote.sin_addr.s_addr),
ntohl(sin->sin_addr.s_addr),
ntohs(hc->sin_remote.sin_port),
ntohs(sin->sin_port));
hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr;
hc->sin_remote.sin_port = sin->sin_port;
}
}
/*
* socket stuff
*/
static int
l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
struct msghdr msg;
struct sockaddr_in sin_rx;
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
struct socket *socket = NULL;
DECLARE_COMPLETION_ONSTACK(wait);
/* allocate buffer memory */
recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
if (!recvbuf) {
printk(KERN_ERR "%s: Failed to alloc recvbuf.\n", __func__);
ret = -ENOMEM;
goto fail;
}
/* make daemon */
allow_signal(SIGTERM);
/* create socket */
if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
printk(KERN_ERR "%s: Failed to create socket.\n", __func__);
ret = -EIO;
goto fail;
}
/* set incoming address */
hc->sin_local.sin_family = AF_INET;
hc->sin_local.sin_addr.s_addr = INADDR_ANY;
hc->sin_local.sin_port = htons((unsigned short)hc->localport);
/* set outgoing address */
hc->sin_remote.sin_family = AF_INET;
hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
/* bind to incoming port */
if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
__func__, hc->localport);
ret = -EINVAL;
goto fail;
}
/* check sk */
if (socket->sk == NULL) {
printk(KERN_ERR "%s: socket->sk == NULL\n", __func__);
ret = -EIO;
goto fail;
}
/* build receive message */
msg.msg_name = &sin_rx;
msg.msg_namelen = sizeof(sin_rx);
msg.msg_control = NULL;
msg.msg_controllen = 0;
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
hc->sendmsg.msg_control = NULL;
hc->sendmsg.msg_controllen = 0;
/* give away socket */
spin_lock(&hc->socket_lock);
hc->socket = socket;
spin_unlock(&hc->socket_lock);
/* read loop */
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
struct kvec iov = {
.iov_base = recvbuf,
.iov_len = recvbuf_size,
};
recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
recvbuf_size, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_WARNING
"%s: broken pipe on socket\n", __func__);
}
}
/* get socket back, check first if in use, maybe by send function */
spin_lock(&hc->socket_lock);
/* if hc->socket is NULL, it is in use until it is given back */
while (!hc->socket) {
spin_unlock(&hc->socket_lock);
schedule_timeout(HZ / 10);
spin_lock(&hc->socket_lock);
}
hc->socket = NULL;
spin_unlock(&hc->socket_lock);
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread terminating\n",
__func__);
fail:
/* free recvbuf */
kfree(recvbuf);
/* close socket */
if (socket)
sock_release(socket);
/* if we got killed, signal completion */
complete(&hc->socket_complete);
hc->socket_thread = NULL; /* show termination of thread */
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread terminated\n",
__func__);
return ret;
}
static void
l1oip_socket_close(struct l1oip *hc)
{
struct dchannel *dch = hc->chan[hc->d_idx].dch;
/* kill thread */
if (hc->socket_thread) {
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread exists, "
"killing...\n", __func__);
send_sig(SIGTERM, hc->socket_thread, 0);
wait_for_completion(&hc->socket_complete);
}
/* if active, we send up a PH_DEACTIVATE and deactivate */
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become deactivated "
"due to timeout\n", __func__);
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
}
static int
l1oip_socket_open(struct l1oip *hc)
{
/* in case of reopen, we need to close first */
l1oip_socket_close(hc);
init_completion(&hc->socket_complete);
/* create receive process */
hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s",
hc->name);
if (IS_ERR(hc->socket_thread)) {
int err = PTR_ERR(hc->socket_thread);
printk(KERN_ERR "%s: Failed (%d) to create socket process.\n",
__func__, err);
hc->socket_thread = NULL;
sock_release(hc->socket);
return err;
}
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: socket thread created\n", __func__);
return 0;
}
static void
l1oip_send_bh(struct work_struct *work)
{
struct l1oip *hc = container_of(work, struct l1oip, workq);
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: keepalive timer expired, sending empty "
"frame on dchannel\n", __func__);
/* send an empty l1oip frame at D-channel */
l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0);
}
/*
* timer stuff
*/
static void
l1oip_keepalive(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
schedule_work(&hc->workq);
}
static void
l1oip_timeout(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: timeout timer expired, turn layer one "
"down.\n", __func__);
hc->timeout_on = 0; /* state that timer must be initialized next time */
/* if timeout, we send up a PH_DEACTIVATE and deactivate */
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: interface become deactivated "
"due to timeout\n", __func__);
test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
_queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_ATOMIC);
}
/* if we have ondemand set, we remove ip address */
if (hc->ondemand) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: on demand causes ip address to "
"be removed\n", __func__);
hc->sin_remote.sin_addr.s_addr = 0;
}
}
/*
* message handling
*/
static int
handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct l1oip *hc = dch->hw;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int ret = -EINVAL;
int l, ll;
unsigned char *p;
switch (hh->prim) {
case PH_DATA_REQ:
if (skb->len < 1) {
printk(KERN_WARNING "%s: skb too small\n",
__func__);
break;
}
if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
printk(KERN_WARNING "%s: skb too large\n",
__func__);
break;
}
/* send frame */
p = skb->data;
l = skb->len;
while (l) {
ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
l -= ll;
}
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
case PH_ACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
, __func__, dch->slot, hc->b_num + 1);
skb_trim(skb, 0);
if (test_bit(FLG_ACTIVE, &dch->Flags))
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
else
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
case PH_DEACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
"(1..%d)\n", __func__, dch->slot,
hc->b_num + 1);
skb_trim(skb, 0);
if (test_bit(FLG_ACTIVE, &dch->Flags))
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
else
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct l1oip *hc = dch->hw;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER
| MISDN_CTRL_GETPEER;
break;
case MISDN_CTRL_SETPEER:
hc->remoteip = (u32)cq->p1;
hc->remoteport = cq->p2 & 0xffff;
hc->localport = cq->p2 >> 16;
if (!hc->remoteport)
hc->remoteport = hc->localport;
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: got new ip address from user "
"space.\n", __func__);
l1oip_socket_open(hc);
break;
case MISDN_CTRL_UNSETPEER:
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: removing ip address.\n",
__func__);
hc->remoteip = 0;
l1oip_socket_open(hc);
break;
case MISDN_CTRL_GETPEER:
if (debug & DEBUG_L1OIP_SOCKET)
printk(KERN_DEBUG "%s: getting ip address.\n",
__func__);
cq->p1 = hc->remoteip;
cq->p2 = hc->remoteport | (hc->localport << 16);
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
{
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
dch->dev.id, __builtin_return_address(0));
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
if ((dch->dev.D.protocol != ISDN_P_NONE) &&
(dch->dev.D.protocol != rq->protocol)) {
if (debug & DEBUG_HW_OPEN)
printk(KERN_WARNING "%s: change protocol %x to %x\n",
__func__, dch->dev.D.protocol, rq->protocol);
}
if (dch->dev.D.protocol != rq->protocol)
dch->dev.D.protocol = rq->protocol;
if (test_bit(FLG_ACTIVE, &dch->Flags)) {
_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
0, NULL, GFP_KERNEL);
}
rq->ch = &dch->dev.D;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
{
struct bchannel *bch;
int ch;
if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */
bch = hc->chan[ch].bch;
if (!bch) {
printk(KERN_ERR "%s:internal error ch %d has no bch\n",
__func__, ch);
return -EINVAL;
}
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
if (!try_module_get(THIS_MODULE))
printk(KERN_WARNING "%s:cannot get module\n", __func__);
return 0;
}
static int
l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct l1oip *hc = dch->hw;
struct channel_req *rq;
int err = 0;
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
switch (rq->protocol) {
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
if (hc->pri) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq);
break;
case ISDN_P_TE_E1:
case ISDN_P_NT_E1:
if (!hc->pri) {
err = -EINVAL;
break;
}
err = open_dchannel(hc, dch, rq);
break;
default:
err = open_bchannel(hc, dch, rq);
}
break;
case CLOSE_CHANNEL:
if (debug & DEBUG_HW_OPEN)
printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
__func__, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_dctrl(dch, arg);
break;
default:
if (dch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: unknown command %x\n",
__func__, cmd);
err = -EINVAL;
}
return err;
}
static int
handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct l1oip *hc = bch->hw;
int ret = -EINVAL;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
int l, ll;
unsigned char *p;
switch (hh->prim) {
case PH_DATA_REQ:
if (skb->len <= 0) {
printk(KERN_WARNING "%s: skb too small\n",
__func__);
break;
}
if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
printk(KERN_WARNING "%s: skb too large\n",
__func__);
break;
}
/* check for AIS / ulaw-silence */
l = skb->len;
if (!memchr_inv(skb->data, 0xff, l)) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: got AIS, not sending, "
"but counting\n", __func__);
hc->chan[bch->slot].tx_counter += l;
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
}
/* check for silence */
l = skb->len;
if (!memchr_inv(skb->data, 0x2a, l)) {
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: got silence, not sending"
", but counting\n", __func__);
hc->chan[bch->slot].tx_counter += l;
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
}
/* send frame */
p = skb->data;
l = skb->len;
while (l) {
ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;
p += ll;
l -= ll;
}
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
return 0;
case PH_ACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
, __func__, bch->slot, hc->b_num + 1);
hc->chan[bch->slot].codecstate = 0;
test_and_set_bit(FLG_ACTIVE, &bch->Flags);
skb_trim(skb, 0);
queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
return 0;
case PH_DEACTIVATE_REQ:
if (debug & (DEBUG_L1OIP_MSG | DEBUG_L1OIP_SOCKET))
printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
"(1..%d)\n", __func__, bch->slot,
hc->b_num + 1);
test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
skb_trim(skb, 0);
queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
return 0;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct dsp_features *features =
(struct dsp_features *)(*((u_long *)&cq->p1));
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_HW_FEATURES_OP;
break;
case MISDN_CTRL_HW_FEATURES: /* fill features structure */
if (debug & DEBUG_L1OIP_MSG)
printk(KERN_DEBUG "%s: HW_FEATURE request\n",
__func__);
/* create confirm */
features->unclocked = 1;
features->unordered = 1;
break;
default:
printk(KERN_WARNING "%s: unknown Op %x\n",
__func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
int err = -EINVAL;
if (bch->debug & DEBUG_HW)
printk(KERN_DEBUG "%s: cmd:%x %p\n",
__func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
err = 0;
break;
case CONTROL_CHANNEL:
err = channel_bctrl(bch, arg);
break;
default:
printk(KERN_WARNING "%s: unknown prim(%x)\n",
__func__, cmd);
}
return err;
}
/*
* cleanup module and stack
*/
static void
release_card(struct l1oip *hc)
{
int ch;
if (timer_pending(&hc->keep_tl))
del_timer(&hc->keep_tl);
if (timer_pending(&hc->timeout_tl))
del_timer(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
if (hc->socket_thread)
l1oip_socket_close(hc);
if (hc->registered && hc->chan[hc->d_idx].dch)
mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev);
for (ch = 0; ch < 128; ch++) {
if (hc->chan[ch].dch) {
mISDN_freedchannel(hc->chan[ch].dch);
kfree(hc->chan[ch].dch);
}
if (hc->chan[ch].bch) {
mISDN_freebchannel(hc->chan[ch].bch);
kfree(hc->chan[ch].bch);
#ifdef REORDER_DEBUG
if (hc->chan[ch].disorder_skb)
dev_kfree_skb(hc->chan[ch].disorder_skb);
#endif
}
}
spin_lock(&l1oip_lock);
list_del(&hc->list);
spin_unlock(&l1oip_lock);
kfree(hc);
}
static void
l1oip_cleanup(void)
{
struct l1oip *hc, *next;
list_for_each_entry_safe(hc, next, &l1oip_ilist, list)
release_card(hc);
l1oip_4bit_free();
}
/*
* module and stack init
*/
static int
init_card(struct l1oip *hc, int pri, int bundle)
{
struct dchannel *dch;
struct bchannel *bch;
int ret;
int i, ch;
spin_lock_init(&hc->socket_lock);
hc->idx = l1oip_cnt;
hc->pri = pri;
hc->d_idx = pri ? 16 : 3;
hc->b_num = pri ? 30 : 2;
hc->bundle = bundle;
if (hc->pri)
sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1);
else
sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1);
switch (codec[l1oip_cnt]) {
case 0: /* as is */
case 1: /* alaw */
case 2: /* ulaw */
case 3: /* 4bit */
break;
default:
printk(KERN_ERR "Codec(%d) not supported.\n",
codec[l1oip_cnt]);
return -EINVAL;
}
hc->codec = codec[l1oip_cnt];
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using codec %d\n",
__func__, hc->codec);
if (id[l1oip_cnt] == 0) {
printk(KERN_WARNING "Warning: No 'id' value given or "
"0, this is highly unsecure. Please use 32 "
"bit randmom number 0x...\n");
}
hc->id = id[l1oip_cnt];
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id);
hc->ondemand = ondemand[l1oip_cnt];
if (hc->ondemand && !hc->id) {
printk(KERN_ERR "%s: ondemand option only allowed in "
"conjunction with non 0 ID\n", __func__);
return -EINVAL;
}
if (limit[l1oip_cnt])
hc->b_num = limit[l1oip_cnt];
if (!pri && hc->b_num > 2) {
printk(KERN_ERR "Maximum limit for BRI interface is 2 "
"channels.\n");
return -EINVAL;
}
if (pri && hc->b_num > 126) {
printk(KERN_ERR "Maximum limit for PRI interface is 126 "
"channels.\n");
return -EINVAL;
}
if (pri && hc->b_num > 30) {
printk(KERN_WARNING "Maximum limit for BRI interface is 30 "
"channels.\n");
printk(KERN_WARNING "Your selection of %d channels must be "
"supported by application.\n", hc->limit);
}
hc->remoteip = ip[l1oip_cnt << 2] << 24
| ip[(l1oip_cnt << 2) + 1] << 16
| ip[(l1oip_cnt << 2) + 2] << 8
| ip[(l1oip_cnt << 2) + 3];
hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT + l1oip_cnt);
if (remoteport[l1oip_cnt])
hc->remoteport = remoteport[l1oip_cnt];
else
hc->remoteport = hc->localport;
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: using local port %d remote ip "
"%d.%d.%d.%d port %d ondemand %d\n", __func__,
hc->localport, hc->remoteip >> 24,
(hc->remoteip >> 16) & 0xff,
(hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff,
hc->remoteport, hc->ondemand);
dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
if (!dch)
return -ENOMEM;
dch->debug = debug;
mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL);
dch->hw = hc;
if (pri)
dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
else
dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
dch->dev.D.send = handle_dmsg;
dch->dev.D.ctrl = l1oip_dctrl;
dch->dev.nrbchan = hc->b_num;
dch->slot = hc->d_idx;
hc->chan[hc->d_idx].dch = dch;
i = 1;
for (ch = 0; ch < dch->dev.nrbchan; ch++) {
if (ch == 15)
i++;
bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
if (!bch) {
printk(KERN_ERR "%s: no memory for bchannel\n",
__func__);
return -ENOMEM;
}
bch->nr = i + ch;
bch->slot = i + ch;
bch->debug = debug;
mISDN_initbchannel(bch, MAX_DATA_MEM, 0);
bch->hw = hc;
bch->ch.send = handle_bmsg;
bch->ch.ctrl = l1oip_bctrl;
bch->ch.nr = i + ch;
list_add(&bch->ch.list, &dch->dev.bchannels);
hc->chan[i + ch].bch = bch;
set_channelmap(bch->nr, dch->dev.channelmap);
}
/* TODO: create a parent device for this driver */
ret = mISDN_register_device(&dch->dev, NULL, hc->name);
if (ret)
return ret;
hc->registered = 1;
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: Setting up network card(%d)\n",
__func__, l1oip_cnt + 1);
ret = l1oip_socket_open(hc);
if (ret)
return ret;
hc->keep_tl.function = (void *)l1oip_keepalive;
hc->keep_tl.data = (ulong)hc;
init_timer(&hc->keep_tl);
hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */
add_timer(&hc->keep_tl);
hc->timeout_tl.function = (void *)l1oip_timeout;
hc->timeout_tl.data = (ulong)hc;
init_timer(&hc->timeout_tl);
hc->timeout_on = 0; /* state that we have timer off */
return 0;
}
static int __init
l1oip_init(void)
{
int pri, bundle;
struct l1oip *hc;
int ret;
printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n",
l1oip_revision);
INIT_LIST_HEAD(&l1oip_ilist);
spin_lock_init(&l1oip_lock);
if (l1oip_4bit_alloc(ulaw))
return -ENOMEM;
l1oip_cnt = 0;
while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) {
switch (type[l1oip_cnt] & 0xff) {
case 1:
pri = 0;
bundle = 0;
break;
case 2:
pri = 1;
bundle = 0;
break;
case 3:
pri = 0;
bundle = 1;
break;
case 4:
pri = 1;
bundle = 1;
break;
default:
printk(KERN_ERR "Card type(%d) not supported.\n",
type[l1oip_cnt] & 0xff);
l1oip_cleanup();
return -EINVAL;
}
if (debug & DEBUG_L1OIP_INIT)
printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
__func__, l1oip_cnt, pri ? "PRI" : "BRI",
bundle ? "bundled IP packet for all B-channels" :
"separate IP packets for every B-channel");
hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
if (!hc) {
printk(KERN_ERR "No kmem for L1-over-IP driver.\n");
l1oip_cleanup();
return -ENOMEM;
}
INIT_WORK(&hc->workq, (void *)l1oip_send_bh);
spin_lock(&l1oip_lock);
list_add_tail(&hc->list, &l1oip_ilist);
spin_unlock(&l1oip_lock);
ret = init_card(hc, pri, bundle);
if (ret) {
l1oip_cleanup();
return ret;
}
l1oip_cnt++;
}
printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt);
return 0;
}
module_init(l1oip_init);
module_exit(l1oip_cleanup);
| gpl-2.0 |
htc-mirror/shooteru-ics-crc-3.0.16-e733189 | drivers/media/video/bt8xx/bttv-gpio.c | 2931 | 4882 | /*
bttv-gpio.c -- gpio sub drivers
sysfs-based sub driver interface for bttv
mainly intended for gpio access
Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
& Marcus Metzler (mocm@thp.uni-koeln.de)
(c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <asm/io.h>
#include "bttvp.h"
/* ----------------------------------------------------------------------- */
/* internal: the bttv "bus" */
static int bttv_sub_bus_match(struct device *dev, struct device_driver *drv)
{
struct bttv_sub_driver *sub = to_bttv_sub_drv(drv);
int len = strlen(sub->wanted);
if (0 == strncmp(dev_name(dev), sub->wanted, len))
return 1;
return 0;
}
static int bttv_sub_probe(struct device *dev)
{
struct bttv_sub_device *sdev = to_bttv_sub_dev(dev);
struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver);
return sub->probe ? sub->probe(sdev) : -ENODEV;
}
static int bttv_sub_remove(struct device *dev)
{
struct bttv_sub_device *sdev = to_bttv_sub_dev(dev);
struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver);
if (sub->remove)
sub->remove(sdev);
return 0;
}
struct bus_type bttv_sub_bus_type = {
.name = "bttv-sub",
.match = &bttv_sub_bus_match,
.probe = bttv_sub_probe,
.remove = bttv_sub_remove,
};
static void release_sub_device(struct device *dev)
{
struct bttv_sub_device *sub = to_bttv_sub_dev(dev);
kfree(sub);
}
int bttv_sub_add_device(struct bttv_core *core, char *name)
{
struct bttv_sub_device *sub;
int err;
sub = kzalloc(sizeof(*sub),GFP_KERNEL);
if (NULL == sub)
return -ENOMEM;
sub->core = core;
sub->dev.parent = &core->pci->dev;
sub->dev.bus = &bttv_sub_bus_type;
sub->dev.release = release_sub_device;
dev_set_name(&sub->dev, "%s%d", name, core->nr);
err = device_register(&sub->dev);
if (0 != err) {
kfree(sub);
return err;
}
printk("bttv%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
list_add_tail(&sub->list,&core->subs);
return 0;
}
int bttv_sub_del_devices(struct bttv_core *core)
{
struct bttv_sub_device *sub, *save;
list_for_each_entry_safe(sub, save, &core->subs, list) {
list_del(&sub->list);
device_unregister(&sub->dev);
}
return 0;
}
/* ----------------------------------------------------------------------- */
/* external: sub-driver register/unregister */
int bttv_sub_register(struct bttv_sub_driver *sub, char *wanted)
{
sub->drv.bus = &bttv_sub_bus_type;
snprintf(sub->wanted,sizeof(sub->wanted),"%s",wanted);
return driver_register(&sub->drv);
}
EXPORT_SYMBOL(bttv_sub_register);
int bttv_sub_unregister(struct bttv_sub_driver *sub)
{
driver_unregister(&sub->drv);
return 0;
}
EXPORT_SYMBOL(bttv_sub_unregister);
/* ----------------------------------------------------------------------- */
/* external: gpio access functions */
void bttv_gpio_inout(struct bttv_core *core, u32 mask, u32 outbits)
{
struct bttv *btv = container_of(core, struct bttv, c);
unsigned long flags;
u32 data;
spin_lock_irqsave(&btv->gpio_lock,flags);
data = btread(BT848_GPIO_OUT_EN);
data = data & ~mask;
data = data | (mask & outbits);
btwrite(data,BT848_GPIO_OUT_EN);
spin_unlock_irqrestore(&btv->gpio_lock,flags);
}
u32 bttv_gpio_read(struct bttv_core *core)
{
struct bttv *btv = container_of(core, struct bttv, c);
u32 value;
value = btread(BT848_GPIO_DATA);
return value;
}
void bttv_gpio_write(struct bttv_core *core, u32 value)
{
struct bttv *btv = container_of(core, struct bttv, c);
btwrite(value,BT848_GPIO_DATA);
}
void bttv_gpio_bits(struct bttv_core *core, u32 mask, u32 bits)
{
struct bttv *btv = container_of(core, struct bttv, c);
unsigned long flags;
u32 data;
spin_lock_irqsave(&btv->gpio_lock,flags);
data = btread(BT848_GPIO_DATA);
data = data & ~mask;
data = data | (mask & bits);
btwrite(data,BT848_GPIO_DATA);
spin_unlock_irqrestore(&btv->gpio_lock,flags);
}
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
insanelycool/VICK | arch/sh/kernel/time.c | 3955 | 2674 | /*
* arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/clockchips.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/rtc.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <asm/rtc.h>
/* Dummy RTC ops */
static void null_rtc_get_time(struct timespec *tv)
{
tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
tv->tv_nsec = 0;
}
static int null_rtc_set_time(const time_t secs)
{
return 0;
}
void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
void read_persistent_clock(struct timespec *ts)
{
rtc_sh_get_time(ts);
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
int update_persistent_clock(struct timespec now)
{
return rtc_sh_set_time(now.tv_sec);
}
#endif
unsigned int get_rtc_time(struct rtc_time *tm)
{
if (rtc_sh_get_time != null_rtc_get_time) {
struct timespec tv;
rtc_sh_get_time(&tv);
rtc_time_to_tm(tv.tv_sec, tm);
}
return RTC_24H;
}
EXPORT_SYMBOL(get_rtc_time);
int set_rtc_time(struct rtc_time *tm)
{
unsigned long secs;
rtc_tm_to_time(tm, &secs);
return rtc_sh_set_time(secs);
}
EXPORT_SYMBOL(set_rtc_time);
static int __init rtc_generic_init(void)
{
struct platform_device *pdev;
if (rtc_sh_get_time == null_rtc_get_time)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(rtc_generic_init);
void (*board_time_init)(void);
static void __init sh_late_time_init(void)
{
/*
* Make sure all compiled-in early timers register themselves.
*
* Run probe() for two "earlytimer" devices, these will be the
* clockevents and clocksource devices respectively. In the event
* that only a clockevents device is available, we -ENODEV on the
* clocksource and the jiffies clocksource is used transparently
* instead. No error handling is necessary here.
*/
early_platform_driver_register_all("earlytimer");
early_platform_driver_probe("earlytimer", 2, 0);
}
void __init time_init(void)
{
if (board_time_init)
board_time_init();
hwblk_init();
clk_init();
late_time_init = sh_late_time_init;
}
| gpl-2.0 |
aapav01/android_kernel_samsung_ms013g-2 | drivers/mmc/host/tmio_mmc_dma.c | 4211 | 8229 | /*
* linux/drivers/mmc/tmio_mmc_dma.c
*
* Copyright (C) 2010-2011 Guennadi Liakhovetski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DMA function for TMIO MMC implementations
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/tmio.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include "tmio_mmc.h"
#define TMIO_MMC_MIN_DMA_LEN 8
void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
if (!host->chan_tx || !host->chan_rx)
return;
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
#endif
}
void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
{
tmio_mmc_enable_dma(host, false);
if (host->chan_rx)
dmaengine_terminate_all(host->chan_rx);
if (host->chan_tx)
dmaengine_terminate_all(host->chan_tx);
tmio_mmc_enable_dma(host, true);
}
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
host->force_pio = true;
return;
}
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
if (chan) {
host->chan_tx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, cookie, host->sg_len);
}
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
host->force_pio = true;
return;
}
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = dmaengine_prep_slave_sg(chan, sg, ret,
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
if (chan) {
host->chan_rx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
desc, cookie);
}
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
tmio_mmc_start_dma_rx(host);
} else {
if (host->chan_tx)
tmio_mmc_start_dma_tx(host);
}
}
static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = NULL;
spin_lock_irq(&host->lock);
if (host && host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
chan = host->chan_tx;
}
spin_unlock_irq(&host->lock);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
if (chan)
dma_async_issue_pending(chan);
}
static void tmio_mmc_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
spin_lock_irq(&host->lock);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
{
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
chan->private = arg;
return true;
}
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!pdata->dma)
return;
if (!host->chan_tx && !host->chan_rx) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_tx);
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_rx);
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto ereqrx;
host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!host->bounce_buf)
goto ebouncebuf;
tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
}
tmio_mmc_enable_dma(host, true);
return;
ebouncebuf:
dma_release_channel(host->chan_rx);
host->chan_rx = NULL;
ereqrx:
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
}
void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
if (host->chan_tx) {
struct dma_chan *chan = host->chan_tx;
host->chan_tx = NULL;
dma_release_channel(chan);
}
if (host->chan_rx) {
struct dma_chan *chan = host->chan_rx;
host->chan_rx = NULL;
dma_release_channel(chan);
}
if (host->bounce_buf) {
free_pages((unsigned long)host->bounce_buf, 0);
host->bounce_buf = NULL;
}
}
| gpl-2.0 |
littlelerroyy/android_kernel_htc_pyramid | drivers/tty/serial/mux.c | 4979 | 15468 | /*
** mux.c:
** serial driver for the Mux console found in some PA-RISC servers.
**
** (c) Copyright 2002 Ryan Bradetich
** (c) Copyright 2002 Hewlett-Packard Company
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
**
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/console.h>
#include <linux/delay.h> /* for udelay */
#include <linux/device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/parisc-device.h>
#ifdef CONFIG_MAGIC_SYSRQ
#include <linux/sysrq.h>
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
#define MUX_OFFSET 0x800
#define MUX_LINE_OFFSET 0x80
#define MUX_FIFO_SIZE 255
#define MUX_POLL_DELAY (30 * HZ / 1000)
#define IO_DATA_REG_OFFSET 0x3c
#define IO_DCOUNT_REG_OFFSET 0x40
#define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000)
#define MUX_STATUS(status) ((status & 0xF000) == 0x8000)
#define MUX_BREAK(status) ((status & 0xF000) == 0x2000)
#define MUX_NR 256
static unsigned int port_cnt __read_mostly;
struct mux_port {
struct uart_port port;
int enabled;
};
static struct mux_port mux_ports[MUX_NR];
static struct uart_driver mux_driver = {
.owner = THIS_MODULE,
.driver_name = "ttyB",
.dev_name = "ttyB",
.major = MUX_MAJOR,
.minor = 0,
.nr = MUX_NR,
};
static struct timer_list mux_timer;
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
/**
* get_mux_port_count - Get the number of available ports on the Mux.
* @dev: The parisc device.
*
* This function is used to determine the number of ports the Mux
* supports. The IODC data reports the number of ports the Mux
* can support, but there are cases where not all the Mux ports
* are connected. This function can override the IODC and
* return the true port count.
*/
static int __init get_mux_port_count(struct parisc_device *dev)
{
int status;
u8 iodc_data[32];
unsigned long bytecnt;
/* If this is the built-in Mux for the K-Class (Eole CAP/MUX),
* we only need to allocate resources for 1 port since the
* other 7 ports are not connected.
*/
if(dev->id.hversion == 0x15)
return 1;
status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
BUG_ON(status != PDC_OK);
/* Return the number of ports specified in the iodc data. */
return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8;
}
/**
* mux_tx_empty - Check if the transmitter fifo is empty.
* @port: Ptr to the uart_port.
*
* This function test if the transmitter fifo for the port
* described by 'port' is empty. If it is empty, this function
* should return TIOCSER_TEMT, otherwise return 0.
*/
static unsigned int mux_tx_empty(struct uart_port *port)
{
return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
}
/**
* mux_set_mctrl - Set the current state of the modem control inputs.
* @ports: Ptr to the uart_port.
* @mctrl: Modem control bits.
*
* The Serial MUX does not support CTS, DCD or DSR so this function
* is ignored.
*/
static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/**
* mux_get_mctrl - Returns the current state of modem control inputs.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support CTS, DCD or DSR so these lines are
* treated as permanently active.
*/
static unsigned int mux_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
/**
* mux_stop_tx - Stop transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support this function.
*/
static void mux_stop_tx(struct uart_port *port)
{
}
/**
* mux_start_tx - Start transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_start_tx(struct uart_port *port)
{
}
/**
* mux_stop_rx - Stop receiving characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_stop_rx(struct uart_port *port)
{
}
/**
* mux_enable_ms - Enable modum status interrupts.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_enable_ms(struct uart_port *port)
{
}
/**
* mux_break_ctl - Control the transmitssion of a break signal.
* @port: Ptr to the uart_port.
* @break_state: Raise/Lower the break signal.
*
* The Serial Mux does not support this function.
*/
static void mux_break_ctl(struct uart_port *port, int break_state)
{
}
/**
* mux_write - Write chars to the mux fifo.
* @port: Ptr to the uart_port.
*
* This function writes all the data from the uart buffer to
* the mux fifo.
*/
static void mux_write(struct uart_port *port)
{
int count;
struct circ_buf *xmit = &port->state->xmit;
if(port->x_char) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if(uart_circ_empty(xmit) || uart_tx_stopped(port)) {
mux_stop_tx(port);
return;
}
count = (port->fifosize) - UART_GET_FIFO_CNT(port);
do {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if(uart_circ_empty(xmit))
break;
} while(--count > 0);
while(UART_GET_FIFO_CNT(port))
udelay(1);
if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
mux_stop_tx(port);
}
/**
* mux_read - Read chars from the mux fifo.
* @port: Ptr to the uart_port.
*
* This reads all available data from the mux's fifo and pushes
* the data to the tty layer.
*/
static void mux_read(struct uart_port *port)
{
int data;
struct tty_struct *tty = port->state->port.tty;
__u32 start_count = port->icount.rx;
while(1) {
data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
if (MUX_STATUS(data))
continue;
if (MUX_EOFIFO(data))
break;
port->icount.rx++;
if (MUX_BREAK(data)) {
port->icount.brk++;
if(uart_handle_break(port))
continue;
}
if (uart_handle_sysrq_char(port, data & 0xffu))
continue;
tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
}
if (start_count != port->icount.rx) {
tty_flip_buffer_push(tty);
}
}
/**
* mux_startup - Initialize the port.
* @port: Ptr to the uart_port.
*
* Grab any resources needed for this port and start the
* mux timer.
*/
static int mux_startup(struct uart_port *port)
{
mux_ports[port->line].enabled = 1;
return 0;
}
/**
* mux_shutdown - Disable the port.
* @port: Ptr to the uart_port.
*
* Release any resources needed for the port.
*/
static void mux_shutdown(struct uart_port *port)
{
mux_ports[port->line].enabled = 0;
}
/**
* mux_set_termios - Chane port parameters.
* @port: Ptr to the uart_port.
* @termios: new termios settings.
* @old: old termios settings.
*
* The Serial Mux does not support this function.
*/
static void
mux_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
}
/**
* mux_type - Describe the port.
* @port: Ptr to the uart_port.
*
* Return a pointer to a string constant describing the
* specified port.
*/
static const char *mux_type(struct uart_port *port)
{
return "Mux";
}
/**
* mux_release_port - Release memory and IO regions.
* @port: Ptr to the uart_port.
*
* Release any memory and IO region resources currently in use by
* the port.
*/
static void mux_release_port(struct uart_port *port)
{
}
/**
* mux_request_port - Request memory and IO regions.
* @port: Ptr to the uart_port.
*
* Request any memory and IO region resources required by the port.
* If any fail, no resources should be registered when this function
* returns, and it should return -EBUSY on failure.
*/
static int mux_request_port(struct uart_port *port)
{
return 0;
}
/**
* mux_config_port - Perform port autoconfiguration.
* @port: Ptr to the uart_port.
* @type: Bitmask of required configurations.
*
* Perform any autoconfiguration steps for the port. This function is
* called if the UPF_BOOT_AUTOCONF flag is specified for the port.
* [Note: This is required for now because of a bug in the Serial core.
* rmk has already submitted a patch to linus, should be available for
* 2.5.47.]
*/
static void mux_config_port(struct uart_port *port, int type)
{
port->type = PORT_MUX;
}
/**
* mux_verify_port - Verify the port information.
* @port: Ptr to the uart_port.
* @ser: Ptr to the serial information.
*
* Verify the new serial port information contained within serinfo is
* suitable for this port type.
*/
static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if(port->membase == NULL)
return -EINVAL;
return 0;
}
/**
* mux_drv_poll - Mux poll function.
* @unused: Unused variable
*
* This function periodically polls the Serial MUX to check for new data.
*/
static void mux_poll(unsigned long unused)
{
int i;
for(i = 0; i < port_cnt; ++i) {
if(!mux_ports[i].enabled)
continue;
mux_read(&mux_ports[i].port);
mux_write(&mux_ports[i].port);
}
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
}
#ifdef CONFIG_SERIAL_MUX_CONSOLE
static void mux_console_write(struct console *co, const char *s, unsigned count)
{
/* Wait until the FIFO drains. */
while(UART_GET_FIFO_CNT(&mux_ports[0].port))
udelay(1);
while(count--) {
if(*s == '\n') {
UART_PUT_CHAR(&mux_ports[0].port, '\r');
}
UART_PUT_CHAR(&mux_ports[0].port, *s++);
}
}
static int mux_console_setup(struct console *co, char *options)
{
return 0;
}
struct tty_driver *mux_console_device(struct console *co, int *index)
{
*index = co->index;
return mux_driver.tty_driver;
}
static struct console mux_console = {
.name = "ttyB",
.write = mux_console_write,
.device = mux_console_device,
.setup = mux_console_setup,
.flags = CON_ENABLED | CON_PRINTBUFFER,
.index = 0,
};
#define MUX_CONSOLE &mux_console
#else
#define MUX_CONSOLE NULL
#endif
static struct uart_ops mux_pops = {
.tx_empty = mux_tx_empty,
.set_mctrl = mux_set_mctrl,
.get_mctrl = mux_get_mctrl,
.stop_tx = mux_stop_tx,
.start_tx = mux_start_tx,
.stop_rx = mux_stop_rx,
.enable_ms = mux_enable_ms,
.break_ctl = mux_break_ctl,
.startup = mux_startup,
.shutdown = mux_shutdown,
.set_termios = mux_set_termios,
.type = mux_type,
.release_port = mux_release_port,
.request_port = mux_request_port,
.config_port = mux_config_port,
.verify_port = mux_verify_port,
};
/**
* mux_probe - Determine if the Serial Mux should claim this device.
* @dev: The parisc device.
*
* Deterimine if the Serial Mux should claim this chip (return 0)
* or not (return 1).
*/
static int __init mux_probe(struct parisc_device *dev)
{
int i, status;
int port_count = get_mux_port_count(dev);
printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count);
dev_set_drvdata(&dev->dev, (void *)(long)port_count);
request_mem_region(dev->hpa.start + MUX_OFFSET,
port_count * MUX_LINE_OFFSET, "Mux");
if(!port_cnt) {
mux_driver.cons = MUX_CONSOLE;
status = uart_register_driver(&mux_driver);
if(status) {
printk(KERN_ERR "Serial mux: Unable to register driver.\n");
return 1;
}
}
for(i = 0; i < port_count; ++i, ++port_cnt) {
struct uart_port *port = &mux_ports[port_cnt].port;
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
port->uartclk = 0;
port->fifosize = MUX_FIFO_SIZE;
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
/* The port->timeout needs to match what is present in
* uart_wait_until_sent in serial_core.c. Otherwise
* the time spent in msleep_interruptable will be very
* long, causing the appearance of a console hang.
*/
port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
BUG_ON(status);
}
return 0;
}
static int __devexit mux_remove(struct parisc_device *dev)
{
int i, j;
int port_count = (long)dev_get_drvdata(&dev->dev);
/* Find Port 0 for this card in the mux_ports list. */
for(i = 0; i < port_cnt; ++i) {
if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET)
break;
}
BUG_ON(i + port_count > port_cnt);
/* Release the resources associated with each port on the device. */
for(j = 0; j < port_count; ++j, ++i) {
struct uart_port *port = &mux_ports[i].port;
uart_remove_one_port(&mux_driver, port);
if(port->membase)
iounmap(port->membase);
}
release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
return 0;
}
/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
* the serial port detection in the proper order. The idea is we always
* want the builtin mux to be detected before addin mux cards, so we
* specifically probe for the builtin mux cards first.
*
* This table only contains the parisc_device_id of known builtin mux
* devices. All other mux cards will be detected by the generic mux_tbl.
*/
static struct parisc_device_id builtin_mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */
{ 0, }
};
static struct parisc_device_id mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl);
MODULE_DEVICE_TABLE(parisc, mux_tbl);
static struct parisc_driver builtin_serial_mux_driver = {
.name = "builtin_serial_mux",
.id_table = builtin_mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
static struct parisc_driver serial_mux_driver = {
.name = "serial_mux",
.id_table = mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
/**
* mux_init - Serial MUX initialization procedure.
*
* Register the Serial MUX driver.
*/
static int __init mux_init(void)
{
register_parisc_driver(&builtin_serial_mux_driver);
register_parisc_driver(&serial_mux_driver);
if(port_cnt > 0) {
/* Start the Mux timer */
init_timer(&mux_timer);
mux_timer.function = mux_poll;
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
register_console(&mux_console);
#endif
}
return 0;
}
/**
* mux_exit - Serial MUX cleanup procedure.
*
* Unregister the Serial MUX driver from the tty layer.
*/
static void __exit mux_exit(void)
{
/* Delete the Mux timer. */
if(port_cnt > 0) {
del_timer(&mux_timer);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
unregister_console(&mux_console);
#endif
}
unregister_parisc_driver(&builtin_serial_mux_driver);
unregister_parisc_driver(&serial_mux_driver);
uart_unregister_driver(&mux_driver);
}
module_init(mux_init);
module_exit(mux_exit);
MODULE_AUTHOR("Ryan Bradetich");
MODULE_DESCRIPTION("Serial MUX driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR);
| gpl-2.0 |
Eliminater74/g3_kernel | drivers/gpio/gpio-pl061.c | 4979 | 9484 | /*
* Copyright (C) 2008, 2009 Provigent Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)
*
* Data sheet: ARM DDI 0190B, September 2000
*/
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
#define GPIOIBE 0x408
#define GPIOIEV 0x40C
#define GPIOIE 0x410
#define GPIORIS 0x414
#define GPIOMIS 0x418
#define GPIOIC 0x41C
#define PL061_GPIO_NR 8
#ifdef CONFIG_PM
struct pl061_context_save_regs {
u8 gpio_data;
u8 gpio_dir;
u8 gpio_is;
u8 gpio_ibe;
u8 gpio_iev;
u8 gpio_ie;
};
#endif
struct pl061_gpio {
/* Each of the two spinlocks protects a different set of hardware
* regiters and data structurs. This decouples the code of the IRQ from
* the GPIO code. This also makes the case of a GPIO routine call from
* the IRQ code simpler.
*/
spinlock_t lock; /* GPIO registers */
void __iomem *base;
int irq_base;
struct irq_chip_generic *irq_gc;
struct gpio_chip gc;
#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
#endif
};
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
unsigned long flags;
unsigned char gpiodir;
if (offset >= gc->ngpio)
return -EINVAL;
spin_lock_irqsave(&chip->lock, flags);
gpiodir = readb(chip->base + GPIODIR);
gpiodir &= ~(1 << offset);
writeb(gpiodir, chip->base + GPIODIR);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
unsigned long flags;
unsigned char gpiodir;
if (offset >= gc->ngpio)
return -EINVAL;
spin_lock_irqsave(&chip->lock, flags);
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
gpiodir = readb(chip->base + GPIODIR);
gpiodir |= 1 << offset;
writeb(gpiodir, chip->base + GPIODIR);
/*
* gpio value is set again, because pl061 doesn't allow to set value of
* a gpio pin before configuring it in OUT mode.
*/
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
return !!readb(chip->base + (1 << (offset + 2)));
}
static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
if (chip->irq_base <= 0)
return -EINVAL;
return chip->irq_base + offset;
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = gc->private;
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
raw_spin_lock_irqsave(&gc->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
gpiois = readb(chip->base + GPIOIS);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
gpiois |= 1 << offset;
if (trigger & IRQ_TYPE_LEVEL_HIGH)
gpioiev |= 1 << offset;
else
gpioiev &= ~(1 << offset);
} else
gpiois &= ~(1 << offset);
writeb(gpiois, chip->base + GPIOIS);
gpioibe = readb(chip->base + GPIOIBE);
if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
gpioibe |= 1 << offset;
else {
gpioibe &= ~(1 << offset);
if (trigger & IRQ_TYPE_EDGE_RISING)
gpioiev |= 1 << offset;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
gpioiev &= ~(1 << offset);
}
writeb(gpioibe, chip->base + GPIOIBE);
writeb(gpioiev, chip->base + GPIOIEV);
raw_spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
unsigned long pending;
int offset;
struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
chained_irq_enter(irqchip, desc);
pending = readb(chip->base + GPIOMIS);
writeb(pending, chip->base + GPIOIC);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
chained_irq_exit(irqchip, desc);
}
static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
{
struct irq_chip_type *ct;
chip->irq_gc = irq_alloc_generic_chip("gpio-pl061", 1, irq_base,
chip->base, handle_simple_irq);
chip->irq_gc->private = chip;
ct = chip->irq_gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = pl061_irq_type;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->regs.mask = GPIOIE;
irq_setup_generic_chip(chip->irq_gc, IRQ_MSK(PL061_GPIO_NR),
IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
int ret, irq, i;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
pdata = dev->dev.platform_data;
if (pdata) {
chip->gc.base = pdata->gpio_base;
chip->irq_base = pdata->irq_base;
} else if (dev->dev.of_node) {
chip->gc.base = -1;
chip->irq_base = 0;
} else {
ret = -ENODEV;
goto free_mem;
}
if (!request_mem_region(dev->res.start,
resource_size(&dev->res), "pl061")) {
ret = -EBUSY;
goto free_mem;
}
chip->base = ioremap(dev->res.start, resource_size(&dev->res));
if (chip->base == NULL) {
ret = -ENOMEM;
goto release_region;
}
spin_lock_init(&chip->lock);
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
chip->gc.to_irq = pl061_to_irq;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
chip->gc.dev = &dev->dev;
chip->gc.owner = THIS_MODULE;
ret = gpiochip_add(&chip->gc);
if (ret)
goto iounmap;
/*
* irq_chip support
*/
if (chip->irq_base <= 0)
return 0;
pl061_init_gc(chip, chip->irq_base);
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = dev->irq[0];
if (irq < 0) {
ret = -ENODEV;
goto iounmap;
}
irq_set_chained_handler(irq, pl061_irq_handler);
irq_set_handler_data(irq, chip);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
if (pdata->directions & (1 << i))
pl061_direction_output(&chip->gc, i,
pdata->values & (1 << i));
else
pl061_direction_input(&chip->gc, i);
}
}
amba_set_drvdata(dev, chip);
return 0;
iounmap:
iounmap(chip->base);
release_region:
release_mem_region(dev->res.start, resource_size(&dev->res));
free_mem:
kfree(chip);
return ret;
}
#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
struct pl061_gpio *chip = dev_get_drvdata(dev);
int offset;
chip->csave_regs.gpio_data = 0;
chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
if (chip->csave_regs.gpio_dir & (1 << offset))
chip->csave_regs.gpio_data |=
pl061_get_value(&chip->gc, offset) << offset;
}
return 0;
}
static int pl061_resume(struct device *dev)
{
struct pl061_gpio *chip = dev_get_drvdata(dev);
int offset;
for (offset = 0; offset < PL061_GPIO_NR; offset++) {
if (chip->csave_regs.gpio_dir & (1 << offset))
pl061_direction_output(&chip->gc, offset,
chip->csave_regs.gpio_data &
(1 << offset));
else
pl061_direction_input(&chip->gc, offset);
}
writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
return 0;
}
static const struct dev_pm_ops pl061_dev_pm_ops = {
.suspend = pl061_suspend,
.resume = pl061_resume,
.freeze = pl061_suspend,
.restore = pl061_resume,
};
#endif
static struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
.mask = 0x000fffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
#ifdef CONFIG_PM
.pm = &pl061_dev_pm_ops,
#endif
},
.id_table = pl061_ids,
.probe = pl061_probe,
};
static int __init pl061_gpio_init(void)
{
return amba_driver_register(&pl061_gpio_driver);
}
subsys_initcall(pl061_gpio_init);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("PL061 GPIO driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ronasimi/android.googlesource.com-kernel-msm | drivers/tty/serial/mux.c | 4979 | 15468 | /*
** mux.c:
** serial driver for the Mux console found in some PA-RISC servers.
**
** (c) Copyright 2002 Ryan Bradetich
** (c) Copyright 2002 Hewlett-Packard Company
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
**
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/console.h>
#include <linux/delay.h> /* for udelay */
#include <linux/device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/parisc-device.h>
#ifdef CONFIG_MAGIC_SYSRQ
#include <linux/sysrq.h>
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
#define MUX_OFFSET 0x800
#define MUX_LINE_OFFSET 0x80
#define MUX_FIFO_SIZE 255
#define MUX_POLL_DELAY (30 * HZ / 1000)
#define IO_DATA_REG_OFFSET 0x3c
#define IO_DCOUNT_REG_OFFSET 0x40
#define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000)
#define MUX_STATUS(status) ((status & 0xF000) == 0x8000)
#define MUX_BREAK(status) ((status & 0xF000) == 0x2000)
#define MUX_NR 256
static unsigned int port_cnt __read_mostly;
struct mux_port {
struct uart_port port;
int enabled;
};
static struct mux_port mux_ports[MUX_NR];
static struct uart_driver mux_driver = {
.owner = THIS_MODULE,
.driver_name = "ttyB",
.dev_name = "ttyB",
.major = MUX_MAJOR,
.minor = 0,
.nr = MUX_NR,
};
static struct timer_list mux_timer;
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
/**
* get_mux_port_count - Get the number of available ports on the Mux.
* @dev: The parisc device.
*
* This function is used to determine the number of ports the Mux
* supports. The IODC data reports the number of ports the Mux
* can support, but there are cases where not all the Mux ports
* are connected. This function can override the IODC and
* return the true port count.
*/
static int __init get_mux_port_count(struct parisc_device *dev)
{
int status;
u8 iodc_data[32];
unsigned long bytecnt;
/* If this is the built-in Mux for the K-Class (Eole CAP/MUX),
* we only need to allocate resources for 1 port since the
* other 7 ports are not connected.
*/
if(dev->id.hversion == 0x15)
return 1;
status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
BUG_ON(status != PDC_OK);
/* Return the number of ports specified in the iodc data. */
return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8;
}
/**
* mux_tx_empty - Check if the transmitter fifo is empty.
* @port: Ptr to the uart_port.
*
* This function test if the transmitter fifo for the port
* described by 'port' is empty. If it is empty, this function
* should return TIOCSER_TEMT, otherwise return 0.
*/
static unsigned int mux_tx_empty(struct uart_port *port)
{
return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
}
/**
* mux_set_mctrl - Set the current state of the modem control inputs.
* @ports: Ptr to the uart_port.
* @mctrl: Modem control bits.
*
* The Serial MUX does not support CTS, DCD or DSR so this function
* is ignored.
*/
static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/**
* mux_get_mctrl - Returns the current state of modem control inputs.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support CTS, DCD or DSR so these lines are
* treated as permanently active.
*/
static unsigned int mux_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
/**
* mux_stop_tx - Stop transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial MUX does not support this function.
*/
static void mux_stop_tx(struct uart_port *port)
{
}
/**
* mux_start_tx - Start transmitting characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_start_tx(struct uart_port *port)
{
}
/**
* mux_stop_rx - Stop receiving characters.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_stop_rx(struct uart_port *port)
{
}
/**
* mux_enable_ms - Enable modum status interrupts.
* @port: Ptr to the uart_port.
*
* The Serial Mux does not support this function.
*/
static void mux_enable_ms(struct uart_port *port)
{
}
/**
* mux_break_ctl - Control the transmitssion of a break signal.
* @port: Ptr to the uart_port.
* @break_state: Raise/Lower the break signal.
*
* The Serial Mux does not support this function.
*/
static void mux_break_ctl(struct uart_port *port, int break_state)
{
}
/**
* mux_write - Write chars to the mux fifo.
* @port: Ptr to the uart_port.
*
* This function writes all the data from the uart buffer to
* the mux fifo.
*/
static void mux_write(struct uart_port *port)
{
int count;
struct circ_buf *xmit = &port->state->xmit;
if(port->x_char) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if(uart_circ_empty(xmit) || uart_tx_stopped(port)) {
mux_stop_tx(port);
return;
}
count = (port->fifosize) - UART_GET_FIFO_CNT(port);
do {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if(uart_circ_empty(xmit))
break;
} while(--count > 0);
while(UART_GET_FIFO_CNT(port))
udelay(1);
if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
mux_stop_tx(port);
}
/**
* mux_read - Read chars from the mux fifo.
* @port: Ptr to the uart_port.
*
* This reads all available data from the mux's fifo and pushes
* the data to the tty layer.
*/
static void mux_read(struct uart_port *port)
{
int data;
struct tty_struct *tty = port->state->port.tty;
__u32 start_count = port->icount.rx;
while(1) {
data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
if (MUX_STATUS(data))
continue;
if (MUX_EOFIFO(data))
break;
port->icount.rx++;
if (MUX_BREAK(data)) {
port->icount.brk++;
if(uart_handle_break(port))
continue;
}
if (uart_handle_sysrq_char(port, data & 0xffu))
continue;
tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
}
if (start_count != port->icount.rx) {
tty_flip_buffer_push(tty);
}
}
/**
* mux_startup - Initialize the port.
* @port: Ptr to the uart_port.
*
* Grab any resources needed for this port and start the
* mux timer.
*/
static int mux_startup(struct uart_port *port)
{
mux_ports[port->line].enabled = 1;
return 0;
}
/**
* mux_shutdown - Disable the port.
* @port: Ptr to the uart_port.
*
* Release any resources needed for the port.
*/
static void mux_shutdown(struct uart_port *port)
{
mux_ports[port->line].enabled = 0;
}
/**
* mux_set_termios - Chane port parameters.
* @port: Ptr to the uart_port.
* @termios: new termios settings.
* @old: old termios settings.
*
* The Serial Mux does not support this function.
*/
static void
mux_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
}
/**
* mux_type - Describe the port.
* @port: Ptr to the uart_port.
*
* Return a pointer to a string constant describing the
* specified port.
*/
static const char *mux_type(struct uart_port *port)
{
return "Mux";
}
/**
* mux_release_port - Release memory and IO regions.
* @port: Ptr to the uart_port.
*
* Release any memory and IO region resources currently in use by
* the port.
*/
static void mux_release_port(struct uart_port *port)
{
}
/**
* mux_request_port - Request memory and IO regions.
* @port: Ptr to the uart_port.
*
* Request any memory and IO region resources required by the port.
* If any fail, no resources should be registered when this function
* returns, and it should return -EBUSY on failure.
*/
static int mux_request_port(struct uart_port *port)
{
return 0;
}
/**
* mux_config_port - Perform port autoconfiguration.
* @port: Ptr to the uart_port.
* @type: Bitmask of required configurations.
*
* Perform any autoconfiguration steps for the port. This function is
* called if the UPF_BOOT_AUTOCONF flag is specified for the port.
* [Note: This is required for now because of a bug in the Serial core.
* rmk has already submitted a patch to linus, should be available for
* 2.5.47.]
*/
static void mux_config_port(struct uart_port *port, int type)
{
port->type = PORT_MUX;
}
/**
* mux_verify_port - Verify the port information.
* @port: Ptr to the uart_port.
* @ser: Ptr to the serial information.
*
* Verify the new serial port information contained within serinfo is
* suitable for this port type.
*/
static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if(port->membase == NULL)
return -EINVAL;
return 0;
}
/**
* mux_drv_poll - Mux poll function.
* @unused: Unused variable
*
* This function periodically polls the Serial MUX to check for new data.
*/
static void mux_poll(unsigned long unused)
{
int i;
for(i = 0; i < port_cnt; ++i) {
if(!mux_ports[i].enabled)
continue;
mux_read(&mux_ports[i].port);
mux_write(&mux_ports[i].port);
}
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
}
#ifdef CONFIG_SERIAL_MUX_CONSOLE
static void mux_console_write(struct console *co, const char *s, unsigned count)
{
/* Wait until the FIFO drains. */
while(UART_GET_FIFO_CNT(&mux_ports[0].port))
udelay(1);
while(count--) {
if(*s == '\n') {
UART_PUT_CHAR(&mux_ports[0].port, '\r');
}
UART_PUT_CHAR(&mux_ports[0].port, *s++);
}
}
static int mux_console_setup(struct console *co, char *options)
{
return 0;
}
struct tty_driver *mux_console_device(struct console *co, int *index)
{
*index = co->index;
return mux_driver.tty_driver;
}
static struct console mux_console = {
.name = "ttyB",
.write = mux_console_write,
.device = mux_console_device,
.setup = mux_console_setup,
.flags = CON_ENABLED | CON_PRINTBUFFER,
.index = 0,
};
#define MUX_CONSOLE &mux_console
#else
#define MUX_CONSOLE NULL
#endif
static struct uart_ops mux_pops = {
.tx_empty = mux_tx_empty,
.set_mctrl = mux_set_mctrl,
.get_mctrl = mux_get_mctrl,
.stop_tx = mux_stop_tx,
.start_tx = mux_start_tx,
.stop_rx = mux_stop_rx,
.enable_ms = mux_enable_ms,
.break_ctl = mux_break_ctl,
.startup = mux_startup,
.shutdown = mux_shutdown,
.set_termios = mux_set_termios,
.type = mux_type,
.release_port = mux_release_port,
.request_port = mux_request_port,
.config_port = mux_config_port,
.verify_port = mux_verify_port,
};
/**
* mux_probe - Determine if the Serial Mux should claim this device.
* @dev: The parisc device.
*
* Deterimine if the Serial Mux should claim this chip (return 0)
* or not (return 1).
*/
static int __init mux_probe(struct parisc_device *dev)
{
int i, status;
int port_count = get_mux_port_count(dev);
printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count);
dev_set_drvdata(&dev->dev, (void *)(long)port_count);
request_mem_region(dev->hpa.start + MUX_OFFSET,
port_count * MUX_LINE_OFFSET, "Mux");
if(!port_cnt) {
mux_driver.cons = MUX_CONSOLE;
status = uart_register_driver(&mux_driver);
if(status) {
printk(KERN_ERR "Serial mux: Unable to register driver.\n");
return 1;
}
}
for(i = 0; i < port_count; ++i, ++port_cnt) {
struct uart_port *port = &mux_ports[port_cnt].port;
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
port->iotype = UPIO_MEM;
port->type = PORT_MUX;
port->irq = 0;
port->uartclk = 0;
port->fifosize = MUX_FIFO_SIZE;
port->ops = &mux_pops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = port_cnt;
/* The port->timeout needs to match what is present in
* uart_wait_until_sent in serial_core.c. Otherwise
* the time spent in msleep_interruptable will be very
* long, causing the appearance of a console hang.
*/
port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
BUG_ON(status);
}
return 0;
}
static int __devexit mux_remove(struct parisc_device *dev)
{
int i, j;
int port_count = (long)dev_get_drvdata(&dev->dev);
/* Find Port 0 for this card in the mux_ports list. */
for(i = 0; i < port_cnt; ++i) {
if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET)
break;
}
BUG_ON(i + port_count > port_cnt);
/* Release the resources associated with each port on the device. */
for(j = 0; j < port_count; ++j, ++i) {
struct uart_port *port = &mux_ports[i].port;
uart_remove_one_port(&mux_driver, port);
if(port->membase)
iounmap(port->membase);
}
release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
return 0;
}
/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
* the serial port detection in the proper order. The idea is we always
* want the builtin mux to be detected before addin mux cards, so we
* specifically probe for the builtin mux cards first.
*
* This table only contains the parisc_device_id of known builtin mux
* devices. All other mux cards will be detected by the generic mux_tbl.
*/
static struct parisc_device_id builtin_mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */
{ 0, }
};
static struct parisc_device_id mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl);
MODULE_DEVICE_TABLE(parisc, mux_tbl);
static struct parisc_driver builtin_serial_mux_driver = {
.name = "builtin_serial_mux",
.id_table = builtin_mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
static struct parisc_driver serial_mux_driver = {
.name = "serial_mux",
.id_table = mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
/**
* mux_init - Serial MUX initialization procedure.
*
* Register the Serial MUX driver.
*/
static int __init mux_init(void)
{
register_parisc_driver(&builtin_serial_mux_driver);
register_parisc_driver(&serial_mux_driver);
if(port_cnt > 0) {
/* Start the Mux timer */
init_timer(&mux_timer);
mux_timer.function = mux_poll;
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
register_console(&mux_console);
#endif
}
return 0;
}
/**
* mux_exit - Serial MUX cleanup procedure.
*
* Unregister the Serial MUX driver from the tty layer.
*/
static void __exit mux_exit(void)
{
/* Delete the Mux timer. */
if(port_cnt > 0) {
del_timer(&mux_timer);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
unregister_console(&mux_console);
#endif
}
unregister_parisc_driver(&builtin_serial_mux_driver);
unregister_parisc_driver(&serial_mux_driver);
uart_unregister_driver(&mux_driver);
}
module_init(mux_init);
module_exit(mux_exit);
MODULE_AUTHOR("Ryan Bradetich");
MODULE_DESCRIPTION("Serial MUX driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR);
| gpl-2.0 |
yvxiang/linux-zswap | sound/synth/emux/emux.c | 8563 | 4329 | /*
* Copyright (C) 2000 Takashi Iwai <tiwai@suse.de>
*
* Routines for control of EMU WaveTable chip
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/emux_synth.h>
#include <linux/init.h>
#include <linux/module.h>
#include "emux_voice.h"
MODULE_AUTHOR("Takashi Iwai");
MODULE_DESCRIPTION("Routines for control of EMU WaveTable chip");
MODULE_LICENSE("GPL");
/*
* create a new hardware dependent device for Emu8000/Emu10k1
*/
int snd_emux_new(struct snd_emux **remu)
{
struct snd_emux *emu;
*remu = NULL;
emu = kzalloc(sizeof(*emu), GFP_KERNEL);
if (emu == NULL)
return -ENOMEM;
spin_lock_init(&emu->voice_lock);
mutex_init(&emu->register_mutex);
emu->client = -1;
#ifdef CONFIG_SND_SEQUENCER_OSS
emu->oss_synth = NULL;
#endif
emu->max_voices = 0;
emu->use_time = 0;
init_timer(&emu->tlist);
emu->tlist.function = snd_emux_timer_callback;
emu->tlist.data = (unsigned long)emu;
emu->timer_active = 0;
*remu = emu;
return 0;
}
EXPORT_SYMBOL(snd_emux_new);
/*
*/
static int sf_sample_new(void *private_data, struct snd_sf_sample *sp,
struct snd_util_memhdr *hdr,
const void __user *buf, long count)
{
struct snd_emux *emu = private_data;
return emu->ops.sample_new(emu, sp, hdr, buf, count);
}
static int sf_sample_free(void *private_data, struct snd_sf_sample *sp,
struct snd_util_memhdr *hdr)
{
struct snd_emux *emu = private_data;
return emu->ops.sample_free(emu, sp, hdr);
}
static void sf_sample_reset(void *private_data)
{
struct snd_emux *emu = private_data;
emu->ops.sample_reset(emu);
}
int snd_emux_register(struct snd_emux *emu, struct snd_card *card, int index, char *name)
{
int err;
struct snd_sf_callback sf_cb;
if (snd_BUG_ON(!emu->hw || emu->max_voices <= 0))
return -EINVAL;
if (snd_BUG_ON(!card || !name))
return -EINVAL;
emu->card = card;
emu->name = kstrdup(name, GFP_KERNEL);
emu->voices = kcalloc(emu->max_voices, sizeof(struct snd_emux_voice),
GFP_KERNEL);
if (emu->voices == NULL)
return -ENOMEM;
/* create soundfont list */
memset(&sf_cb, 0, sizeof(sf_cb));
sf_cb.private_data = emu;
if (emu->ops.sample_new)
sf_cb.sample_new = sf_sample_new;
if (emu->ops.sample_free)
sf_cb.sample_free = sf_sample_free;
if (emu->ops.sample_reset)
sf_cb.sample_reset = sf_sample_reset;
emu->sflist = snd_sf_new(&sf_cb, emu->memhdr);
if (emu->sflist == NULL)
return -ENOMEM;
if ((err = snd_emux_init_hwdep(emu)) < 0)
return err;
snd_emux_init_voices(emu);
snd_emux_init_seq(emu, card, index);
#ifdef CONFIG_SND_SEQUENCER_OSS
snd_emux_init_seq_oss(emu);
#endif
snd_emux_init_virmidi(emu, card);
#ifdef CONFIG_PROC_FS
snd_emux_proc_init(emu, card, index);
#endif
return 0;
}
EXPORT_SYMBOL(snd_emux_register);
/*
*/
int snd_emux_free(struct snd_emux *emu)
{
unsigned long flags;
if (! emu)
return -EINVAL;
spin_lock_irqsave(&emu->voice_lock, flags);
if (emu->timer_active)
del_timer(&emu->tlist);
spin_unlock_irqrestore(&emu->voice_lock, flags);
#ifdef CONFIG_PROC_FS
snd_emux_proc_free(emu);
#endif
snd_emux_delete_virmidi(emu);
#ifdef CONFIG_SND_SEQUENCER_OSS
snd_emux_detach_seq_oss(emu);
#endif
snd_emux_detach_seq(emu);
snd_emux_delete_hwdep(emu);
if (emu->sflist)
snd_sf_free(emu->sflist);
kfree(emu->voices);
kfree(emu->name);
kfree(emu);
return 0;
}
EXPORT_SYMBOL(snd_emux_free);
/*
* INIT part
*/
static int __init alsa_emux_init(void)
{
return 0;
}
static void __exit alsa_emux_exit(void)
{
}
module_init(alsa_emux_init)
module_exit(alsa_emux_exit)
| gpl-2.0 |
TeamGlade-Devices/android_kernel_htc_pico | arch/mips/sibyte/swarm/setup.c | 10099 | 4272 | /*
* Copyright (C) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* Setup code for the SWARM board
*/
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/screen_info.h>
#include <linux/initrd.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <asm/traps.h>
#include <asm/sibyte/sb1250.h>
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#include <asm/sibyte/sb1250_regs.h>
#else
#error invalid SiByte board configuration
#endif
#include <asm/sibyte/sb1250_genbus.h>
#include <asm/sibyte/board.h>
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
extern void bcm1480_setup(void);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
extern void sb1250_setup(void);
#else
#error invalid SiByte board configuration
#endif
extern int xicor_probe(void);
extern int xicor_set_time(unsigned long);
extern unsigned long xicor_get_time(void);
extern int m41t81_probe(void);
extern int m41t81_set_time(unsigned long);
extern unsigned long m41t81_get_time(void);
const char *get_system_type(void)
{
return "SiByte " SIBYTE_BOARD_NAME;
}
int swarm_be_handler(struct pt_regs *regs, int is_fixup)
{
if (!is_fixup && (regs->cp0_cause & 4)) {
/* Data bus error - print PA */
printk("DBE physical address: %010Lx\n",
__read_64bit_c0_register($26, 1));
}
return (is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL);
}
enum swarm_rtc_type {
RTC_NONE,
RTC_XICOR,
RTC_M41T81,
};
enum swarm_rtc_type swarm_rtc_type;
void read_persistent_clock(struct timespec *ts)
{
unsigned long sec;
switch (swarm_rtc_type) {
case RTC_XICOR:
sec = xicor_get_time();
break;
case RTC_M41T81:
sec = m41t81_get_time();
break;
case RTC_NONE:
default:
sec = mktime(2000, 1, 1, 0, 0, 0);
break;
}
ts->tv_sec = sec;
ts->tv_nsec = 0;
}
int rtc_mips_set_time(unsigned long sec)
{
switch (swarm_rtc_type) {
case RTC_XICOR:
return xicor_set_time(sec);
case RTC_M41T81:
return m41t81_set_time(sec);
case RTC_NONE:
default:
return -1;
}
}
void __init plat_mem_setup(void)
{
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
bcm1480_setup();
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
sb1250_setup();
#else
#error invalid SiByte board configuration
#endif
panic_timeout = 5; /* For debug. */
board_be_handler = swarm_be_handler;
if (xicor_probe())
swarm_rtc_type = RTC_XICOR;
if (m41t81_probe())
swarm_rtc_type = RTC_M41T81;
#ifdef CONFIG_VT
screen_info = (struct screen_info) {
.orig_video_page = 52,
.orig_video_mode = 3,
.orig_video_cols = 80,
.flags = 12,
.orig_video_ega_bx = 3,
.orig_video_lines = 25,
.orig_video_isVGA = 0x22,
.orig_video_points = 16,
};
/* XXXKW for CFE, get lines/cols from environment */
#endif
}
#ifdef LEDS_PHYS
#ifdef CONFIG_SIBYTE_CARMEL
/* XXXKW need to detect Monterey/LittleSur/etc */
#undef LEDS_PHYS
#define LEDS_PHYS MLEDS_PHYS
#endif
void setleds(char *str)
{
void *reg;
int i;
for (i = 0; i < 4; i++) {
reg = IOADDR(LEDS_PHYS) + 0x20 + ((3 - i) << 3);
if (!str[i])
writeb(' ', reg);
else
writeb(str[i], reg);
}
}
#endif /* LEDS_PHYS */
| gpl-2.0 |
bas-t/media_tree | drivers/staging/iio/meter/ade7758_ring.c | 116 | 4526 | /*
* ADE7758 Poly Phase Multifunction Energy Metering IC driver
*
* Copyright 2010-2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
#include "ade7758.h"
/**
* ade7758_spi_read_burst() - read data registers
* @indio_dev: the IIO device
**/
static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
int ret;
ret = spi_sync(st->us, &st->ring_msg);
if (ret)
dev_err(&st->us->dev, "problem when reading WFORM value\n");
return ret;
}
static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
{
int ret;
u8 reg;
ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, ®);
if (ret)
goto out;
reg &= ~0x1F;
reg |= type & 0x1F;
ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
return ret;
}
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
* specific to be rolled into the core.
*/
static irqreturn_t ade7758_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ade7758_state *st = iio_priv(indio_dev);
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
if (ade7758_spi_read_burst(indio_dev) >= 0)
*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
/**
* ade7758_ring_preenable() setup the parameters of the ring before enabling
*
* The complex nature of the setting of the number of bytes per datum is due
* to this driver currently ensuring that the timestamp is stored at an 8
* byte boundary.
**/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
unsigned int channel;
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
channel = find_first_bit(indio_dev->active_scan_mask,
indio_dev->masklength);
ade7758_write_waveform_type(&indio_dev->dev,
indio_dev->channels[channel].address);
return 0;
}
static const struct iio_buffer_setup_ops ade7758_ring_setup_ops = {
.preenable = &ade7758_ring_preenable,
.postenable = &iio_triggered_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
.validate_scan_mask = &iio_validate_scan_mask_onehot,
};
void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
iio_kfifo_free(indio_dev->buffer);
}
int ade7758_configure_ring(struct iio_dev *indio_dev)
{
struct ade7758_state *st = iio_priv(indio_dev);
struct iio_buffer *buffer;
int ret = 0;
buffer = iio_kfifo_allocate();
if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ade7758_trigger_handler,
0,
indio_dev,
"ade7759_consumer%d",
indio_dev->id);
if (!indio_dev->pollfunc) {
ret = -ENOMEM;
goto error_iio_kfifo_free;
}
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
st->tx_buf[1] = 0;
st->tx_buf[2] = 0;
st->tx_buf[3] = 0;
st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM);
st->tx_buf[5] = 0;
st->tx_buf[6] = 0;
st->tx_buf[7] = 0;
/* build spi ring message */
st->ring_xfer[0].tx_buf = &st->tx_buf[0];
st->ring_xfer[0].len = 1;
st->ring_xfer[0].bits_per_word = 8;
st->ring_xfer[0].delay_usecs = 4;
st->ring_xfer[1].rx_buf = &st->rx_buf[1];
st->ring_xfer[1].len = 3;
st->ring_xfer[1].bits_per_word = 8;
st->ring_xfer[1].cs_change = 1;
st->ring_xfer[2].tx_buf = &st->tx_buf[4];
st->ring_xfer[2].len = 1;
st->ring_xfer[2].bits_per_word = 8;
st->ring_xfer[2].delay_usecs = 1;
st->ring_xfer[3].rx_buf = &st->rx_buf[5];
st->ring_xfer[3].len = 3;
st->ring_xfer[3].bits_per_word = 8;
spi_message_init(&st->ring_msg);
spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg);
return 0;
error_iio_kfifo_free:
iio_kfifo_free(indio_dev->buffer);
return ret;
}
| gpl-2.0 |
Agontuk/android_kernel_sony_u8500 | drivers/md/dm-snap-persistent.c | 372 | 20977 | /*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
*
* This file is released under the GPL.
*/
#include "dm-exception-store.h"
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
#define DM_MSG_PREFIX "persistent snapshot"
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
/*-----------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
*---------------------------------------------------------------*/
/*
* We need to store a record of which parts of the origin have
* been copied to the snapshot device. The snapshot code
* requires that we copy exception chunks to chunk aligned areas
* of the COW store. It makes sense therefore, to store the
* metadata in chunk size blocks.
*
* There is no backward or forward compatibility implemented,
* snapshots with different disk versions than the kernel will
* not be usable. It is expected that "lvcreate" will blank out
* the start of a fresh COW device before calling the snapshot
* constructor.
*
* The first chunk of the COW device just contains the header.
* After this there is a chunk filled with exception metadata,
* followed by as many exception chunks as can fit in the
* metadata areas.
*
* All on disk structures are in little-endian format. The end
* of the exceptions info is indicated by an exception with a
* new_chunk of 0, which is invalid since it would point to the
* header chunk.
*/
/*
* Magic for persistent snapshots: "SnAp" - Feeble isn't it.
*/
#define SNAP_MAGIC 0x70416e53
/*
* The on-disk version of the metadata.
*/
#define SNAPSHOT_DISK_VERSION 1
#define NUM_SNAPSHOT_HDR_CHUNKS 1
struct disk_header {
uint32_t magic;
/*
* Is this snapshot valid. There is no way of recovering
* an invalid snapshot.
*/
uint32_t valid;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
uint32_t version;
/* In sectors */
uint32_t chunk_size;
};
struct disk_exception {
uint64_t old_chunk;
uint64_t new_chunk;
};
struct commit_callback {
void (*callback)(void *, int success);
void *context;
};
/*
* The top level structure for a persistent exception store.
*/
struct pstore {
struct dm_exception_store *store;
int version;
int valid;
uint32_t exceptions_per_area;
/*
* Now that we have an asynchronous kcopyd there is no
* need for large chunk sizes, so it wont hurt to have a
* whole chunks worth of metadata in memory at once.
*/
void *area;
/*
* An area of zeros used to clear the next area.
*/
void *zero_area;
/*
* An area used for header. The header can be written
* concurrently with metadata (when invalidating the snapshot),
* so it needs a separate buffer.
*/
void *header_area;
/*
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
chunk_t current_area;
/*
* The next free chunk for an exception.
*
* When creating exceptions, all the chunks here and above are
* free. It holds the next chunk to be allocated. On rare
* occasions (e.g. after a system crash) holes can be left in
* the exception store because chunks can be committed out of
* order.
*
* When merging exceptions, it does not necessarily mean all the
* chunks here and above are free. It holds the value it would
* have held if all chunks had been committed in order of
* allocation. Consequently the value may occasionally be
* slightly too low, but since it's only used for 'status' and
* it can never reach its minimum value too early this doesn't
* matter.
*/
chunk_t next_free;
/*
* The index of next free exception in the current
* metadata area.
*/
uint32_t current_committed;
atomic_t pending_count;
uint32_t callback_count;
struct commit_callback *callbacks;
struct dm_io_client *io_client;
struct workqueue_struct *metadata_wq;
};
static int alloc_area(struct pstore *ps)
{
int r = -ENOMEM;
size_t len;
len = ps->store->chunk_size << SECTOR_SHIFT;
/*
* Allocate the chunk_size block of memory that will hold
* a single metadata area.
*/
ps->area = vmalloc(len);
if (!ps->area)
goto err_area;
ps->zero_area = vmalloc(len);
if (!ps->zero_area)
goto err_zero_area;
memset(ps->zero_area, 0, len);
ps->header_area = vmalloc(len);
if (!ps->header_area)
goto err_header_area;
return 0;
err_header_area:
vfree(ps->zero_area);
err_zero_area:
vfree(ps->area);
err_area:
return r;
}
static void free_area(struct pstore *ps)
{
if (ps->area)
vfree(ps->area);
ps->area = NULL;
if (ps->zero_area)
vfree(ps->zero_area);
ps->zero_area = NULL;
if (ps->header_area)
vfree(ps->header_area);
ps->header_area = NULL;
}
struct mdata_req {
struct dm_io_region *where;
struct dm_io_request *io_req;
struct work_struct work;
int result;
};
static void do_metadata(struct work_struct *work)
{
struct mdata_req *req = container_of(work, struct mdata_req, work);
req->result = dm_io(req->io_req, 1, req->where, NULL);
}
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
int metadata)
{
struct dm_io_region where = {
.bdev = dm_snap_cow(ps->store->snap)->bdev,
.sector = ps->store->chunk_size * chunk,
.count = ps->store->chunk_size,
};
struct dm_io_request io_req = {
.bi_rw = rw,
.mem.type = DM_IO_VMA,
.mem.ptr.vma = area,
.client = ps->io_client,
.notify.fn = NULL,
};
struct mdata_req req;
if (!metadata)
return dm_io(&io_req, 1, &where, NULL);
req.where = &where;
req.io_req = &io_req;
/*
* Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion.
*/
INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
return req.result;
}
/*
* Convert a metadata area index to a chunk index.
*/
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
}
/*
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
static int area_io(struct pstore *ps, int rw)
{
int r;
chunk_t chunk;
chunk = area_location(ps, ps->current_area);
r = chunk_io(ps, ps->area, chunk, rw, 0);
if (r)
return r;
return 0;
}
static void zero_memory_area(struct pstore *ps)
{
memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
}
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
{
int r;
struct disk_header *dh;
unsigned chunk_size;
int chunk_size_supplied = 1;
char *chunk_err;
/*
* Use default chunk size (or logical_block_size, if larger)
* if none supplied
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
}
ps->io_client = dm_io_client_create();
if (IS_ERR(ps->io_client))
return PTR_ERR(ps->io_client);
r = alloc_area(ps);
if (r)
return r;
r = chunk_io(ps, ps->header_area, 0, READ, 1);
if (r)
goto bad;
dh = ps->header_area;
if (le32_to_cpu(dh->magic) == 0) {
*new_snapshot = 1;
return 0;
}
if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
DMWARN("Invalid or corrupt snapshot");
r = -ENXIO;
goto bad;
}
*new_snapshot = 0;
ps->valid = le32_to_cpu(dh->valid);
ps->version = le32_to_cpu(dh->version);
chunk_size = le32_to_cpu(dh->chunk_size);
if (ps->store->chunk_size == chunk_size)
return 0;
if (chunk_size_supplied)
DMWARN("chunk size %u in device metadata overrides "
"table chunk size of %u.",
chunk_size, ps->store->chunk_size);
/* We had a bogus chunk_size. Fix stuff up. */
free_area(ps);
r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
&chunk_err);
if (r) {
DMERR("invalid on-disk chunk size %u: %s.",
chunk_size, chunk_err);
return r;
}
r = alloc_area(ps);
return r;
bad:
free_area(ps);
return r;
}
static int write_header(struct pstore *ps)
{
struct disk_header *dh;
memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
dh = ps->header_area;
dh->magic = cpu_to_le32(SNAP_MAGIC);
dh->valid = cpu_to_le32(ps->valid);
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
return chunk_io(ps, ps->header_area, 0, WRITE, 1);
}
/*
* Access functions for the disk exceptions, these do the endian conversions.
*/
static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
{
BUG_ON(index >= ps->exceptions_per_area);
return ((struct disk_exception *) ps->area) + index;
}
static void read_exception(struct pstore *ps,
uint32_t index, struct disk_exception *result)
{
struct disk_exception *e = get_exception(ps, index);
/* copy it */
result->old_chunk = le64_to_cpu(e->old_chunk);
result->new_chunk = le64_to_cpu(e->new_chunk);
}
static void write_exception(struct pstore *ps,
uint32_t index, struct disk_exception *de)
{
struct disk_exception *e = get_exception(ps, index);
/* copy it */
e->old_chunk = cpu_to_le64(de->old_chunk);
e->new_chunk = cpu_to_le64(de->new_chunk);
}
static void clear_exception(struct pstore *ps, uint32_t index)
{
struct disk_exception *e = get_exception(ps, index);
/* clear it */
e->old_chunk = 0;
e->new_chunk = 0;
}
/*
* Registers the exceptions that are present in the current area.
* 'full' is filled in to indicate if the area has been
* filled.
*/
static int insert_exceptions(struct pstore *ps,
int (*callback)(void *callback_context,
chunk_t old, chunk_t new),
void *callback_context,
int *full)
{
int r;
unsigned int i;
struct disk_exception de;
/* presume the area is full */
*full = 1;
for (i = 0; i < ps->exceptions_per_area; i++) {
read_exception(ps, i, &de);
/*
* If the new_chunk is pointing at the start of
* the COW device, where the first metadata area
* is we know that we've hit the end of the
* exceptions. Therefore the area is not full.
*/
if (de.new_chunk == 0LL) {
ps->current_committed = i;
*full = 0;
break;
}
/*
* Keep track of the start of the free chunks.
*/
if (ps->next_free <= de.new_chunk)
ps->next_free = de.new_chunk + 1;
/*
* Otherwise we add the exception to the snapshot.
*/
r = callback(callback_context, de.old_chunk, de.new_chunk);
if (r)
return r;
}
return 0;
}
static int read_exceptions(struct pstore *ps,
int (*callback)(void *callback_context, chunk_t old,
chunk_t new),
void *callback_context)
{
int r, full = 1;
/*
* Keeping reading chunks and inserting exceptions until
* we find a partially full area.
*/
for (ps->current_area = 0; full; ps->current_area++) {
r = area_io(ps, READ);
if (r)
return r;
r = insert_exceptions(ps, callback, callback_context, &full);
if (r)
return r;
}
ps->current_area--;
return 0;
}
static struct pstore *get_info(struct dm_exception_store *store)
{
return (struct pstore *) store->context;
}
static void persistent_usage(struct dm_exception_store *store,
sector_t *total_sectors,
sector_t *sectors_allocated,
sector_t *metadata_sectors)
{
struct pstore *ps = get_info(store);
*sectors_allocated = ps->next_free * store->chunk_size;
*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
/*
* First chunk is the fixed header.
* Then there are (ps->current_area + 1) metadata chunks, each one
* separated from the next by ps->exceptions_per_area data chunks.
*/
*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
store->chunk_size;
}
static void persistent_dtr(struct dm_exception_store *store)
{
struct pstore *ps = get_info(store);
destroy_workqueue(ps->metadata_wq);
/* Created in read_header */
if (ps->io_client)
dm_io_client_destroy(ps->io_client);
free_area(ps);
/* Allocated in persistent_read_metadata */
if (ps->callbacks)
vfree(ps->callbacks);
kfree(ps);
}
static int persistent_read_metadata(struct dm_exception_store *store,
int (*callback)(void *callback_context,
chunk_t old, chunk_t new),
void *callback_context)
{
int r, uninitialized_var(new_snapshot);
struct pstore *ps = get_info(store);
/*
* Read the snapshot header.
*/
r = read_header(ps, &new_snapshot);
if (r)
return r;
/*
* Now we know correct chunk_size, complete the initialisation.
*/
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
sizeof(*ps->callbacks));
if (!ps->callbacks)
return -ENOMEM;
/*
* Do we need to setup a new snapshot ?
*/
if (new_snapshot) {
r = write_header(ps);
if (r) {
DMWARN("write_header failed");
return r;
}
ps->current_area = 0;
zero_memory_area(ps);
r = zero_disk_area(ps, 0);
if (r)
DMWARN("zero_disk_area(0) failed");
return r;
}
/*
* Sanity checks.
*/
if (ps->version != SNAPSHOT_DISK_VERSION) {
DMWARN("unable to handle snapshot disk version %d",
ps->version);
return -EINVAL;
}
/*
* Metadata are valid, but snapshot is invalidated
*/
if (!ps->valid)
return 1;
/*
* Read the metadata.
*/
r = read_exceptions(ps, callback, callback_context);
return r;
}
static int persistent_prepare_exception(struct dm_exception_store *store,
struct dm_exception *e)
{
struct pstore *ps = get_info(store);
uint32_t stride;
chunk_t next_free;
sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
/* Is there enough room ? */
if (size < ((ps->next_free + 1) * store->chunk_size))
return -ENOSPC;
e->new_chunk = ps->next_free;
/*
* Move onto the next free pending, making sure to take
* into account the location of the metadata chunks.
*/
stride = (ps->exceptions_per_area + 1);
next_free = ++ps->next_free;
if (sector_div(next_free, stride) == 1)
ps->next_free++;
atomic_inc(&ps->pending_count);
return 0;
}
static void persistent_commit_exception(struct dm_exception_store *store,
struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
unsigned int i;
struct pstore *ps = get_info(store);
struct disk_exception de;
struct commit_callback *cb;
de.old_chunk = e->old_chunk;
de.new_chunk = e->new_chunk;
write_exception(ps, ps->current_committed++, &de);
/*
* Add the callback to the back of the array. This code
* is the only place where the callback array is
* manipulated, and we know that it will never be called
* multiple times concurrently.
*/
cb = ps->callbacks + ps->callback_count++;
cb->callback = callback;
cb->context = callback_context;
/*
* If there are exceptions in flight and we have not yet
* filled this metadata area there's nothing more to do.
*/
if (!atomic_dec_and_test(&ps->pending_count) &&
(ps->current_committed != ps->exceptions_per_area))
return;
/*
* If we completely filled the current area, then wipe the next one.
*/
if ((ps->current_committed == ps->exceptions_per_area) &&
zero_disk_area(ps, ps->current_area + 1))
ps->valid = 0;
/*
* Commit exceptions to disk.
*/
if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
ps->valid = 0;
/*
* Advance to the next area if this one is full.
*/
if (ps->current_committed == ps->exceptions_per_area) {
ps->current_committed = 0;
ps->current_area++;
zero_memory_area(ps);
}
for (i = 0; i < ps->callback_count; i++) {
cb = ps->callbacks + i;
cb->callback(cb->context, ps->valid);
}
ps->callback_count = 0;
}
static int persistent_prepare_merge(struct dm_exception_store *store,
chunk_t *last_old_chunk,
chunk_t *last_new_chunk)
{
struct pstore *ps = get_info(store);
struct disk_exception de;
int nr_consecutive;
int r;
/*
* When current area is empty, move back to preceding area.
*/
if (!ps->current_committed) {
/*
* Have we finished?
*/
if (!ps->current_area)
return 0;
ps->current_area--;
r = area_io(ps, READ);
if (r < 0)
return r;
ps->current_committed = ps->exceptions_per_area;
}
read_exception(ps, ps->current_committed - 1, &de);
*last_old_chunk = de.old_chunk;
*last_new_chunk = de.new_chunk;
/*
* Find number of consecutive chunks within the current area,
* working backwards.
*/
for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
nr_consecutive++) {
read_exception(ps, ps->current_committed - 1 - nr_consecutive,
&de);
if (de.old_chunk != *last_old_chunk - nr_consecutive ||
de.new_chunk != *last_new_chunk - nr_consecutive)
break;
}
return nr_consecutive;
}
static int persistent_commit_merge(struct dm_exception_store *store,
int nr_merged)
{
int r, i;
struct pstore *ps = get_info(store);
BUG_ON(nr_merged > ps->current_committed);
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
r = area_io(ps, WRITE_FLUSH_FUA);
if (r < 0)
return r;
ps->current_committed -= nr_merged;
/*
* At this stage, only persistent_usage() uses ps->next_free, so
* we make no attempt to keep ps->next_free strictly accurate
* as exceptions may have been committed out-of-order originally.
* Once a snapshot has become merging, we set it to the value it
* would have held had all the exceptions been committed in order.
*
* ps->current_area does not get reduced by prepare_merge() until
* after commit_merge() has removed the nr_merged previous exceptions.
*/
ps->next_free = area_location(ps, ps->current_area) +
ps->current_committed + 1;
return 0;
}
static void persistent_drop_snapshot(struct dm_exception_store *store)
{
struct pstore *ps = get_info(store);
ps->valid = 0;
if (write_header(ps))
DMWARN("write header failed");
}
static int persistent_ctr(struct dm_exception_store *store,
unsigned argc, char **argv)
{
struct pstore *ps;
/* allocate the pstore */
ps = kzalloc(sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
ps->store = store;
ps->valid = 1;
ps->version = SNAPSHOT_DISK_VERSION;
ps->area = NULL;
ps->zero_area = NULL;
ps->header_area = NULL;
ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
ps->current_committed = 0;
ps->callback_count = 0;
atomic_set(&ps->pending_count, 0);
ps->callbacks = NULL;
ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
if (!ps->metadata_wq) {
kfree(ps);
DMERR("couldn't start header metadata update thread");
return -ENOMEM;
}
store->context = ps;
return 0;
}
static unsigned persistent_status(struct dm_exception_store *store,
status_type_t status, char *result,
unsigned maxlen)
{
unsigned sz = 0;
switch (status) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
}
return sz;
}
static struct dm_exception_store_type _persistent_type = {
.name = "persistent",
.module = THIS_MODULE,
.ctr = persistent_ctr,
.dtr = persistent_dtr,
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
.prepare_merge = persistent_prepare_merge,
.commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
.usage = persistent_usage,
.status = persistent_status,
};
static struct dm_exception_store_type _persistent_compat_type = {
.name = "P",
.module = THIS_MODULE,
.ctr = persistent_ctr,
.dtr = persistent_dtr,
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
.prepare_merge = persistent_prepare_merge,
.commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
.usage = persistent_usage,
.status = persistent_status,
};
int dm_persistent_snapshot_init(void)
{
int r;
r = dm_exception_store_type_register(&_persistent_type);
if (r) {
DMERR("Unable to register persistent exception store type");
return r;
}
r = dm_exception_store_type_register(&_persistent_compat_type);
if (r) {
DMERR("Unable to register old-style persistent exception "
"store type");
dm_exception_store_type_unregister(&_persistent_type);
return r;
}
return r;
}
void dm_persistent_snapshot_exit(void)
{
dm_exception_store_type_unregister(&_persistent_type);
dm_exception_store_type_unregister(&_persistent_compat_type);
}
| gpl-2.0 |
Pauliecoon/android_kernel_motorola_shamu_benzoCore | lib/spinlock_debug.c | 372 | 7353 | /*
* Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*
* This file contains the spinlock/rwlock implementations for
* DEBUG_SPINLOCK.
*/
#include <linux/spinlock.h>
#include <linux/nmi.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/bug.h>
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
EXPORT_SYMBOL(__rwlock_init);
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
lock->owner_cpu);
BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
if (!debug_locks_off())
return;
spin_dump(lock, msg);
}
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
for (i = 0; i < loops; i++) {
if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
/*
* The trylock above was causing a livelock. Give the lower level arch
* specific lock code a chance to acquire the lock. We have already
* printed a warning/backtrace at this point. The non-debug arch
* specific code might actually succeed in acquiring the lock. If it is
* not successful, the end-result is the same - there is no forward
* progress.
*/
arch_spin_lock(&lock->raw_lock);
}
void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
return;
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
msg, raw_smp_processor_id(), current->comm,
task_pid_nr(current), lock);
dump_stack();
}
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_lock(&lock->raw_lock);
}
int do_raw_read_trylock(rwlock_t *lock)
{
int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_write_lock_after(rwlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
static inline void debug_write_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
int print_once = 1;
for (;;) {
for (i = 0; i < loops; i++) {
if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
/* lockup suspected: */
if (print_once) {
print_once = 0;
printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
"%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
}
}
}
#endif
void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
int do_raw_write_trylock(rwlock_t *lock)
{
int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
#ifndef CONFIG_SMP
/*
* Must not happen on UP:
*/
RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
#endif
return ret;
}
void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
| gpl-2.0 |
WaRP7/linux-fslc | drivers/edac/mv64x60_edac.c | 884 | 22972 | /*
* Marvell MV64x60 Memory Controller kernel module for PPC platforms
*
* Author: Dave Jiang <djiang@mvista.com>
*
* 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/edac.h>
#include <linux/gfp.h>
#include "edac_core.h"
#include "edac_module.h"
#include "mv64x60_edac.h"
static const char *mv64x60_ctl_name = "MV64x60";
static int edac_dev_idx;
static int edac_pci_idx;
static int edac_mc_idx;
/*********************** PCI err device **********************************/
#ifdef CONFIG_PCI
static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
{
struct mv64x60_pci_pdata *pdata = pci->pvt_info;
u32 cause;
cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
if (!cause)
return;
printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
printk(KERN_ERR "Attribute: 0x%08x\n",
in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
printk(KERN_ERR "Command: 0x%08x\n",
in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
if (cause & MV64X60_PCI_PE_MASK)
edac_pci_handle_pe(pci, pci->ctl_name);
if (!(cause & MV64X60_PCI_PE_MASK))
edac_pci_handle_npe(pci, pci->ctl_name);
}
static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
{
struct edac_pci_ctl_info *pci = dev_id;
struct mv64x60_pci_pdata *pdata = pci->pvt_info;
u32 val;
val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
if (!val)
return IRQ_NONE;
mv64x60_pci_check(pci);
return IRQ_HANDLED;
}
/*
* Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
* errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
* well. IOW, don't set bit 0.
*/
/* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
static int __init mv64x60_pci_fixup(struct platform_device *pdev)
{
struct resource *r;
void __iomem *pci_serr;
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"PCI err regs\n", __func__);
return -ENOENT;
}
pci_serr = ioremap(r->start, resource_size(r));
if (!pci_serr)
return -ENOMEM;
out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
iounmap(pci_serr);
return 0;
}
static int mv64x60_pci_err_probe(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci;
struct mv64x60_pci_pdata *pdata;
struct resource *r;
int res = 0;
if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
return -ENOMEM;
pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
if (!pci)
return -ENOMEM;
pdata = pci->pvt_info;
pdata->pci_hose = pdev->id;
pdata->name = "mpc85xx_pci_err";
pdata->irq = NO_IRQ;
platform_set_drvdata(pdev, pci);
pci->dev = &pdev->dev;
pci->dev_name = dev_name(&pdev->dev);
pci->mod_name = EDAC_MOD_STR;
pci->ctl_name = pdata->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
pci->edac_check = mv64x60_pci_check;
pdata->edac_idx = edac_pci_idx++;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"PCI err regs\n", __func__);
res = -ENOENT;
goto err;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->pci_vbase = devm_ioremap(&pdev->dev,
r->start,
resource_size(r));
if (!pdata->pci_vbase) {
printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
res = -ENOMEM;
goto err;
}
res = mv64x60_pci_fixup(pdev);
if (res < 0) {
printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
goto err;
}
out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
MV64X60_PCIx_ERR_MASK_VAL);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev,
pdata->irq,
mv64x60_pci_isr,
0,
"[EDAC] PCI err",
pci);
if (res < 0) {
printk(KERN_ERR "%s: Unable to request irq %d for "
"MV64x60 PCI ERR\n", __func__, pdata->irq);
res = -ENODEV;
goto err2;
}
printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
pdata->irq);
}
devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
err2:
edac_pci_del_device(&pdev->dev);
err:
edac_pci_free_ctl_info(pci);
devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
return res;
}
static int mv64x60_pci_err_remove(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
edac_dbg(0, "\n");
edac_pci_del_device(&pdev->dev);
edac_pci_free_ctl_info(pci);
return 0;
}
static struct platform_driver mv64x60_pci_err_driver = {
.probe = mv64x60_pci_err_probe,
.remove = mv64x60_pci_err_remove,
.driver = {
.name = "mv64x60_pci_err",
}
};
#endif /* CONFIG_PCI */
/*********************** SRAM err device **********************************/
static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
{
struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
u32 cause;
cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
if (!cause)
return;
printk(KERN_ERR "Error in internal SRAM\n");
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
printk(KERN_ERR "Data Low: 0x%08x\n",
in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
printk(KERN_ERR "Data High: 0x%08x\n",
in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
printk(KERN_ERR "Parity: 0x%08x\n",
in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
{
struct edac_device_ctl_info *edac_dev = dev_id;
struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
u32 cause;
cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
if (!cause)
return IRQ_NONE;
mv64x60_sram_check(edac_dev);
return IRQ_HANDLED;
}
static int mv64x60_sram_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct mv64x60_sram_pdata *pdata;
struct resource *r;
int res = 0;
if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
return -ENOMEM;
edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
"sram", 1, NULL, 0, 0, NULL, 0,
edac_dev_idx);
if (!edac_dev) {
devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
return -ENOMEM;
}
pdata = edac_dev->pvt_info;
pdata->name = "mv64x60_sram_err";
pdata->irq = NO_IRQ;
edac_dev->dev = &pdev->dev;
platform_set_drvdata(pdev, edac_dev);
edac_dev->dev_name = dev_name(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"SRAM err regs\n", __func__);
res = -ENOENT;
goto err;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while request mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->sram_vbase = devm_ioremap(&pdev->dev,
r->start,
resource_size(r));
if (!pdata->sram_vbase) {
printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
__func__);
res = -ENOMEM;
goto err;
}
/* setup SRAM err registers */
out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
edac_dev->mod_name = EDAC_MOD_STR;
edac_dev->ctl_name = pdata->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = mv64x60_sram_check;
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev,
pdata->irq,
mv64x60_sram_isr,
0,
"[EDAC] SRAM err",
edac_dev);
if (res < 0) {
printk(KERN_ERR
"%s: Unable to request irq %d for "
"MV64x60 SRAM ERR\n", __func__, pdata->irq);
res = -ENODEV;
goto err2;
}
printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
pdata->irq);
}
devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
err2:
edac_device_del_device(&pdev->dev);
err:
devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
edac_device_free_ctl_info(edac_dev);
return res;
}
static int mv64x60_sram_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
static struct platform_driver mv64x60_sram_err_driver = {
.probe = mv64x60_sram_err_probe,
.remove = mv64x60_sram_err_remove,
.driver = {
.name = "mv64x60_sram_err",
}
};
/*********************** CPU err device **********************************/
static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
{
struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
u32 cause;
cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
MV64x60_CPU_CAUSE_MASK;
if (!cause)
return;
printk(KERN_ERR "Error on CPU interface\n");
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
printk(KERN_ERR "Data Low: 0x%08x\n",
in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
printk(KERN_ERR "Data High: 0x%08x\n",
in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
printk(KERN_ERR "Parity: 0x%08x\n",
in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
{
struct edac_device_ctl_info *edac_dev = dev_id;
struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
u32 cause;
cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
MV64x60_CPU_CAUSE_MASK;
if (!cause)
return IRQ_NONE;
mv64x60_cpu_check(edac_dev);
return IRQ_HANDLED;
}
static int mv64x60_cpu_err_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev;
struct resource *r;
struct mv64x60_cpu_pdata *pdata;
int res = 0;
if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
return -ENOMEM;
edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
"cpu", 1, NULL, 0, 0, NULL, 0,
edac_dev_idx);
if (!edac_dev) {
devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
return -ENOMEM;
}
pdata = edac_dev->pvt_info;
pdata->name = "mv64x60_cpu_err";
pdata->irq = NO_IRQ;
edac_dev->dev = &pdev->dev;
platform_set_drvdata(pdev, edac_dev);
edac_dev->dev_name = dev_name(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"CPU err regs\n", __func__);
res = -ENOENT;
goto err;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
r->start,
resource_size(r));
if (!pdata->cpu_vbase[0]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
goto err;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"CPU err regs\n", __func__);
res = -ENOENT;
goto err;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
r->start,
resource_size(r));
if (!pdata->cpu_vbase[1]) {
printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
res = -ENOMEM;
goto err;
}
/* setup CPU err registers */
out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
edac_dev->mod_name = EDAC_MOD_STR;
edac_dev->ctl_name = pdata->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
edac_dev->edac_check = mv64x60_cpu_check;
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev,
pdata->irq,
mv64x60_cpu_isr,
0,
"[EDAC] CPU err",
edac_dev);
if (res < 0) {
printk(KERN_ERR
"%s: Unable to request irq %d for MV64x60 "
"CPU ERR\n", __func__, pdata->irq);
res = -ENODEV;
goto err2;
}
printk(KERN_INFO EDAC_MOD_STR
" acquired irq %d for CPU Err\n", pdata->irq);
}
devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
err2:
edac_device_del_device(&pdev->dev);
err:
devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
edac_device_free_ctl_info(edac_dev);
return res;
}
static int mv64x60_cpu_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
return 0;
}
static struct platform_driver mv64x60_cpu_err_driver = {
.probe = mv64x60_cpu_err_probe,
.remove = mv64x60_cpu_err_remove,
.driver = {
.name = "mv64x60_cpu_err",
}
};
/*********************** DRAM err device **********************************/
static void mv64x60_mc_check(struct mem_ctl_info *mci)
{
struct mv64x60_mc_pdata *pdata = mci->pvt_info;
u32 reg;
u32 err_addr;
u32 sdram_ecc;
u32 comp_ecc;
u32 syndrome;
reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
if (!reg)
return;
err_addr = reg & ~0x3;
sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
syndrome = sdram_ecc ^ comp_ecc;
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & PAGE_MASK, syndrome,
0, 0, -1,
mci->ctl_name, "");
else /* 2 bit error, UE */
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & PAGE_MASK, 0,
0, 0, -1,
mci->ctl_name, "");
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
}
static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
struct mv64x60_mc_pdata *pdata = mci->pvt_info;
u32 reg;
reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
if (!reg)
return IRQ_NONE;
/* writing 0's to the ECC err addr in check function clears irq */
mv64x60_mc_check(mci);
return IRQ_HANDLED;
}
static void get_total_mem(struct mv64x60_mc_pdata *pdata)
{
struct device_node *np = NULL;
const unsigned int *reg;
np = of_find_node_by_type(NULL, "memory");
if (!np)
return;
reg = of_get_property(np, "reg", NULL);
pdata->total_mem = reg[1];
}
static void mv64x60_init_csrows(struct mem_ctl_info *mci,
struct mv64x60_mc_pdata *pdata)
{
struct csrow_info *csrow;
struct dimm_info *dimm;
u32 devtype;
u32 ctl;
get_total_mem(pdata);
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = mci->csrows[0];
dimm = csrow->channels[0]->dimm;
dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
dimm->grain = 8;
dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
devtype = (ctl >> 20) & 0x3;
switch (devtype) {
case 0x0:
dimm->dtype = DEV_X32;
break;
case 0x2: /* could be X8 too, but no way to tell */
dimm->dtype = DEV_X16;
break;
case 0x3:
dimm->dtype = DEV_X4;
break;
default:
dimm->dtype = DEV_UNKNOWN;
break;
}
dimm->edac_mode = EDAC_SECDED;
}
static int mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
struct edac_mc_layer layers[2];
struct mv64x60_mc_pdata *pdata;
struct resource *r;
u32 ctl;
int res = 0;
if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = 1;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
sizeof(struct mv64x60_mc_pdata));
if (!mci) {
printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
return -ENOMEM;
}
pdata = mci->pvt_info;
mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
pdata->name = "mv64x60_mc_err";
pdata->irq = NO_IRQ;
mci->dev_name = dev_name(&pdev->dev);
pdata->edac_idx = edac_mc_idx++;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
printk(KERN_ERR "%s: Unable to get resource for "
"MC err regs\n", __func__);
res = -ENOENT;
goto err;
}
if (!devm_request_mem_region(&pdev->dev,
r->start,
resource_size(r),
pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
pdata->mc_vbase = devm_ioremap(&pdev->dev,
r->start,
resource_size(r));
if (!pdata->mc_vbase) {
printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;
goto err;
}
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
if (!(ctl & MV64X60_SDRAM_ECC)) {
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
goto err2;
}
edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = MV64x60_REVISION;
mci->ctl_name = mv64x60_ctl_name;
if (edac_op_state == EDAC_OPSTATE_POLL)
mci->edac_check = mv64x60_mc_check;
mci->ctl_page_to_phys = NULL;
mci->scrub_mode = SCRUB_SW_SRC;
mv64x60_init_csrows(mci, pdata);
/* setup MC registers */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
ctl = (ctl & 0xff00ffff) | 0x10000;
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
res = edac_mc_add_mc(mci);
if (res) {
edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
if (edac_op_state == EDAC_OPSTATE_INT) {
/* acquire interrupt that reports errors */
pdata->irq = platform_get_irq(pdev, 0);
res = devm_request_irq(&pdev->dev,
pdata->irq,
mv64x60_mc_isr,
0,
"[EDAC] MC err",
mci);
if (res < 0) {
printk(KERN_ERR "%s: Unable to request irq %d for "
"MV64x60 DRAM ERR\n", __func__, pdata->irq);
res = -ENODEV;
goto err2;
}
printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
pdata->irq);
}
/* get this far and it's successful */
edac_dbg(3, "success\n");
return 0;
err2:
edac_mc_del_mc(&pdev->dev);
err:
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
edac_mc_free(mci);
return res;
}
static int mv64x60_mc_err_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_dbg(0, "\n");
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
return 0;
}
static struct platform_driver mv64x60_mc_err_driver = {
.probe = mv64x60_mc_err_probe,
.remove = mv64x60_mc_err_remove,
.driver = {
.name = "mv64x60_mc_err",
}
};
static int __init mv64x60_edac_init(void)
{
int ret = 0;
printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
/* make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
case EDAC_OPSTATE_INT:
break;
default:
edac_op_state = EDAC_OPSTATE_INT;
break;
}
ret = platform_driver_register(&mv64x60_mc_err_driver);
if (ret)
printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
ret = platform_driver_register(&mv64x60_cpu_err_driver);
if (ret)
printk(KERN_WARNING EDAC_MOD_STR
"CPU err failed to register\n");
ret = platform_driver_register(&mv64x60_sram_err_driver);
if (ret)
printk(KERN_WARNING EDAC_MOD_STR
"SRAM err failed to register\n");
#ifdef CONFIG_PCI
ret = platform_driver_register(&mv64x60_pci_err_driver);
if (ret)
printk(KERN_WARNING EDAC_MOD_STR
"PCI err failed to register\n");
#endif
return ret;
}
module_init(mv64x60_edac_init);
static void __exit mv64x60_edac_exit(void)
{
#ifdef CONFIG_PCI
platform_driver_unregister(&mv64x60_pci_err_driver);
#endif
platform_driver_unregister(&mv64x60_sram_err_driver);
platform_driver_unregister(&mv64x60_cpu_err_driver);
platform_driver_unregister(&mv64x60_mc_err_driver);
}
module_exit(mv64x60_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state,
"EDAC Error Reporting state: 0=Poll, 2=Interrupt");
| gpl-2.0 |
spica234/HP-CM9-Kernel-32 | kernel/utsname.c | 1652 | 1689 | /*
* Copyright (C) 2004 IBM Corporation
*
* Author: Serge Hallyn <serue@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/err.h>
#include <linux/slab.h>
static struct uts_namespace *create_uts_ns(void)
{
struct uts_namespace *uts_ns;
uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
if (uts_ns)
kref_init(&uts_ns->kref);
return uts_ns;
}
/*
* Clone a new ns copying an original utsname, setting refcount to 1
* @old_ns: namespace to clone
* Return NULL on error (failure to kmalloc), new ns otherwise
*/
static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns)
{
struct uts_namespace *ns;
ns = create_uts_ns();
if (!ns)
return ERR_PTR(-ENOMEM);
down_read(&uts_sem);
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
up_read(&uts_sem);
return ns;
}
/*
* Copy task tsk's utsname namespace, or clone it if flags
* specifies CLONE_NEWUTS. In latter case, changes to the
* utsname of this process won't be seen by parent, and vice
* versa.
*/
struct uts_namespace *copy_utsname(unsigned long flags, struct uts_namespace *old_ns)
{
struct uts_namespace *new_ns;
BUG_ON(!old_ns);
get_uts_ns(old_ns);
if (!(flags & CLONE_NEWUTS))
return old_ns;
new_ns = clone_uts_ns(old_ns);
put_uts_ns(old_ns);
return new_ns;
}
void free_uts_ns(struct kref *kref)
{
struct uts_namespace *ns;
ns = container_of(kref, struct uts_namespace, kref);
kfree(ns);
}
| gpl-2.0 |
EPDCenter/android_kernel_rockchip_mk908 | arch/powerpc/mm/fault.c | 2164 | 12160 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <linux/ratelimit.h>
#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
return 0;
}
#endif
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
return 0;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
case 39: /* stbu */
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
return 1;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
/* check minor opcode */
switch ((inst >> 1) & 0x3ff) {
case 181: /* stdux */
case 183: /* stwux */
case 247: /* stbux */
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
return 1;
}
}
return 0;
}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
* the error_code parameter is ESR for a data fault, 0 for an instruction
* fault.
* For 64-bit processors, the error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = 0, ret;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
#else
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
if (notify_page_fault(regs))
return 0;
if (unlikely(debugger_fault_handler(regs)))
return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->gpr[1]
&& (!user_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
if (error_code & 0x95700000)
/* an error such as lwarx to I/O controller space,
address matching DABR, eciwx, etc. */
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
/* 8xx sometimes need to load a invalid/non-present TLBs.
* These must be invalidated separately as linux mm don't.
*/
if (error_code & 0x40000000) /* no translation? */
_tlbil_va(address, 0, 0, 0);
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
if (error_code & 0x10000000)
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
if (is_exec) {
#ifdef CONFIG_PPC_STD_MMU
/* Protection fault on exec go straight to failure on
* Hash based MMUs as they either don't support per-page
* execute permission, or if they do, it's handled already
* at the hash level. This test would probably have to
* be removed if we change the way this works to make hash
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
if (error_code & DSISR_PROTFAULT)
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
* Note: That code used to not be enabled for 4xx/BookE.
* It is now as I/D cache coherency for these is done at
* set_pte_at() time and I see no reason why the test
* below wouldn't be valid on those processors. This -may-
* break programs compiled with a really old ABI though.
*/
if (!(vma->vm_flags & VM_EXEC) &&
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
else if (ret & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
get_lppaca()->page_ins += (1 << PAGE_FACTOR);
preempt_enable();
}
#endif
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return 0;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
return 0;
}
if (is_exec && (error_code & DSISR_PROTFAULT))
printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
address, current_uid());
return SIGSEGV;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
return SIGKILL;
pagefault_out_of_memory();
return 0;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return 0;
}
return SIGBUS;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = entry->fixup;
return;
}
/* kernel has accessed a bad area */
switch (regs->trap) {
case 0x300:
case 0x380:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"data at address 0x%08lx\n", regs->dar);
break;
case 0x400:
case 0x480:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
stackend = end_of_stack(current);
if (current != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
}
| gpl-2.0 |
MatiasBjorling/lightnvm-moved-to-OpenChannelSSD-Linux | drivers/staging/vt6656/michael.c | 2676 | 3958 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: michael.cpp
*
* Purpose: The implementation of LIST data structure.
*
* Author: Kyle Hsu
*
* Date: Sep 4, 2002
*
* Functions:
* s_dwGetUINT32 - Convert from u8[] to u32 in a portable way
* s_vPutUINT32 - Convert from u32 to u8[] in a portable way
* s_vClear - Reset the state to the empty message.
* s_vSetKey - Set the key.
* MIC_vInit - Set the key.
* s_vAppendByte - Append the byte to our word-sized buffer.
* MIC_vAppend - call s_vAppendByte.
* MIC_vGetMIC - Append the minimum padding and call s_vAppendByte.
*
* Revision History:
*
*/
#include "tmacro.h"
#include "michael.h"
/*
* static u32 s_dwGetUINT32(u8 * p); Get u32 from
* 4 bytes LSByte first
* static void s_vPutUINT32(u8* p, u32 val); Put u32 into
* 4 bytes LSByte first
*/
static void s_vClear(void); /* Clear the internal message,
* resets the object to the
* state just after construction. */
static void s_vSetKey(u32 dwK0, u32 dwK1);
static void s_vAppendByte(u8 b); /* Add a single byte to the internal
* message */
static u32 L, R; /* Current state */
static u32 K0, K1; /* Key */
static u32 M; /* Message accumulator (single word) */
static unsigned int nBytesInM; /* # bytes in M */
/*
static u32 s_dwGetUINT32 (u8 * p)
// Convert from u8[] to u32 in a portable way
{
u32 res = 0;
unsigned int i;
for (i = 0; i < 4; i++)
res |= (*p++) << (8*i);
return res;
}
static void s_vPutUINT32(u8 *p, u32 val)
// Convert from u32 to u8[] in a portable way
{
unsigned int i;
for (i = 0; i < 4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
}
*/
static void s_vClear(void)
{
/* Reset the state to the empty message. */
L = K0;
R = K1;
nBytesInM = 0;
M = 0;
}
static void s_vSetKey(u32 dwK0, u32 dwK1)
{
/* Set the key */
K0 = dwK0;
K1 = dwK1;
/* and reset the message */
s_vClear();
}
static void s_vAppendByte(u8 b)
{
/* Append the byte to our word-sized buffer */
M |= b << (8*nBytesInM);
nBytesInM++;
/* Process the word if it is full. */
if (nBytesInM >= 4) {
L ^= M;
R ^= ROL32(L, 17);
L += R;
R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
L += R;
R ^= ROL32(L, 3);
L += R;
R ^= ROR32(L, 2);
L += R;
/* Clear the buffer */
M = 0;
nBytesInM = 0;
}
}
void MIC_vInit(u32 dwK0, u32 dwK1)
{
/* Set the key */
s_vSetKey(dwK0, dwK1);
}
void MIC_vUnInit(void)
{
/* Wipe the key material */
K0 = 0;
K1 = 0;
/* And the other fields as well. */
/* Note that this sets (L,R) to (K0,K1) which is just fine. */
s_vClear();
}
void MIC_vAppend(u8 * src, unsigned int nBytes)
{
/* This is simple */
while (nBytes > 0) {
s_vAppendByte(*src++);
nBytes--;
}
}
void MIC_vGetMIC(u32 * pdwL, u32 * pdwR)
{
/* Append the minimum padding */
s_vAppendByte(0x5a);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
/* and then zeroes until the length is a multiple of 4 */
while (nBytesInM != 0)
s_vAppendByte(0);
/* The s_vAppendByte function has already computed the result. */
*pdwL = L;
*pdwR = R;
/* Reset to the empty message. */
s_vClear();
}
| gpl-2.0 |
The-Sickness/S6-MM | drivers/staging/vt6656/michael.c | 2676 | 3958 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: michael.cpp
*
* Purpose: The implementation of LIST data structure.
*
* Author: Kyle Hsu
*
* Date: Sep 4, 2002
*
* Functions:
* s_dwGetUINT32 - Convert from u8[] to u32 in a portable way
* s_vPutUINT32 - Convert from u32 to u8[] in a portable way
* s_vClear - Reset the state to the empty message.
* s_vSetKey - Set the key.
* MIC_vInit - Set the key.
* s_vAppendByte - Append the byte to our word-sized buffer.
* MIC_vAppend - call s_vAppendByte.
* MIC_vGetMIC - Append the minimum padding and call s_vAppendByte.
*
* Revision History:
*
*/
#include "tmacro.h"
#include "michael.h"
/*
* static u32 s_dwGetUINT32(u8 * p); Get u32 from
* 4 bytes LSByte first
* static void s_vPutUINT32(u8* p, u32 val); Put u32 into
* 4 bytes LSByte first
*/
static void s_vClear(void); /* Clear the internal message,
* resets the object to the
* state just after construction. */
static void s_vSetKey(u32 dwK0, u32 dwK1);
static void s_vAppendByte(u8 b); /* Add a single byte to the internal
* message */
static u32 L, R; /* Current state */
static u32 K0, K1; /* Key */
static u32 M; /* Message accumulator (single word) */
static unsigned int nBytesInM; /* # bytes in M */
/*
static u32 s_dwGetUINT32 (u8 * p)
// Convert from u8[] to u32 in a portable way
{
u32 res = 0;
unsigned int i;
for (i = 0; i < 4; i++)
res |= (*p++) << (8*i);
return res;
}
static void s_vPutUINT32(u8 *p, u32 val)
// Convert from u32 to u8[] in a portable way
{
unsigned int i;
for (i = 0; i < 4; i++) {
*p++ = (u8) (val & 0xff);
val >>= 8;
}
}
*/
static void s_vClear(void)
{
/* Reset the state to the empty message. */
L = K0;
R = K1;
nBytesInM = 0;
M = 0;
}
static void s_vSetKey(u32 dwK0, u32 dwK1)
{
/* Set the key */
K0 = dwK0;
K1 = dwK1;
/* and reset the message */
s_vClear();
}
static void s_vAppendByte(u8 b)
{
/* Append the byte to our word-sized buffer */
M |= b << (8*nBytesInM);
nBytesInM++;
/* Process the word if it is full. */
if (nBytesInM >= 4) {
L ^= M;
R ^= ROL32(L, 17);
L += R;
R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
L += R;
R ^= ROL32(L, 3);
L += R;
R ^= ROR32(L, 2);
L += R;
/* Clear the buffer */
M = 0;
nBytesInM = 0;
}
}
void MIC_vInit(u32 dwK0, u32 dwK1)
{
/* Set the key */
s_vSetKey(dwK0, dwK1);
}
void MIC_vUnInit(void)
{
/* Wipe the key material */
K0 = 0;
K1 = 0;
/* And the other fields as well. */
/* Note that this sets (L,R) to (K0,K1) which is just fine. */
s_vClear();
}
void MIC_vAppend(u8 * src, unsigned int nBytes)
{
/* This is simple */
while (nBytes > 0) {
s_vAppendByte(*src++);
nBytes--;
}
}
void MIC_vGetMIC(u32 * pdwL, u32 * pdwR)
{
/* Append the minimum padding */
s_vAppendByte(0x5a);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
s_vAppendByte(0);
/* and then zeroes until the length is a multiple of 4 */
while (nBytesInM != 0)
s_vAppendByte(0);
/* The s_vAppendByte function has already computed the result. */
*pdwL = L;
*pdwR = R;
/* Reset to the empty message. */
s_vClear();
}
| gpl-2.0 |
miquelmartos/geeksphone-kernel-zero-3.0 | drivers/hwmon/tmp102.c | 2932 | 8695 | /* Texas Instruments TMP102 SMBus temperature sensor driver
*
* Copyright (C) 2010 Steven King <sfking@fdwdc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/device.h>
#define DRIVER_NAME "tmp102"
#define TMP102_TEMP_REG 0x00
#define TMP102_CONF_REG 0x01
/* note: these bit definitions are byte swapped */
#define TMP102_CONF_SD 0x0100
#define TMP102_CONF_TM 0x0200
#define TMP102_CONF_POL 0x0400
#define TMP102_CONF_F0 0x0800
#define TMP102_CONF_F1 0x1000
#define TMP102_CONF_R0 0x2000
#define TMP102_CONF_R1 0x4000
#define TMP102_CONF_OS 0x8000
#define TMP102_CONF_EM 0x0010
#define TMP102_CONF_AL 0x0020
#define TMP102_CONF_CR0 0x0040
#define TMP102_CONF_CR1 0x0080
#define TMP102_TLOW_REG 0x02
#define TMP102_THIGH_REG 0x03
struct tmp102 {
struct device *hwmon_dev;
struct mutex lock;
u16 config_orig;
unsigned long last_update;
int temp[3];
};
/* SMBus specifies low byte first, but the TMP102 returns high byte first,
* so we have to swab16 the values */
static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
{
int result = i2c_smbus_read_word_data(client, reg);
return result < 0 ? result : swab16(result);
}
static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
{
return i2c_smbus_write_word_data(client, reg, swab16(val));
}
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
static inline int tmp102_reg_to_mC(s16 val)
{
return ((val & ~0x01) * 1000) / 128;
}
/* convert milliCelsius to left adjusted 13-bit TMP102 register value */
static inline u16 tmp102_mC_to_reg(int val)
{
return (val * 128) / 1000;
}
static const u8 tmp102_reg[] = {
TMP102_TEMP_REG,
TMP102_TLOW_REG,
TMP102_THIGH_REG,
};
static struct tmp102 *tmp102_update_device(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
mutex_lock(&tmp102->lock);
if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
int i;
for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
int status = tmp102_read_reg(client, tmp102_reg[i]);
if (status > -1)
tmp102->temp[i] = tmp102_reg_to_mC(status);
}
tmp102->last_update = jiffies;
}
mutex_unlock(&tmp102->lock);
return tmp102;
}
static ssize_t tmp102_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
}
static ssize_t tmp102_set_temp(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct tmp102 *tmp102 = i2c_get_clientdata(client);
long val;
int status;
if (strict_strtol(buf, 10, &val) < 0)
return -EINVAL;
val = SENSORS_LIMIT(val, -256000, 255000);
mutex_lock(&tmp102->lock);
tmp102->temp[sda->index] = val;
status = tmp102_write_reg(client, tmp102_reg[sda->index],
tmp102_mC_to_reg(val));
mutex_unlock(&tmp102->lock);
return status ? : count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
tmp102_set_temp, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
tmp102_set_temp, 2);
static struct attribute *tmp102_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
NULL
};
static const struct attribute_group tmp102_attr_group = {
.attrs = tmp102_attributes,
};
#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
static int __devinit tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tmp102 *tmp102;
int status;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "adapter doesn't support SMBus word "
"transactions\n");
return -ENODEV;
}
tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
if (!tmp102) {
dev_dbg(&client->dev, "kzalloc failed\n");
return -ENOMEM;
}
i2c_set_clientdata(client, tmp102);
status = tmp102_read_reg(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
goto fail_free;
}
tmp102->config_orig = status;
status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
if (status < 0) {
dev_err(&client->dev, "error writing config register\n");
goto fail_restore_config;
}
status = tmp102_read_reg(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
goto fail_restore_config;
}
status &= ~TMP102_CONFIG_RD_ONLY;
if (status != TMP102_CONFIG) {
dev_err(&client->dev, "config settings did not stick\n");
status = -ENODEV;
goto fail_restore_config;
}
tmp102->last_update = jiffies - HZ;
mutex_init(&tmp102->lock);
status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group);
if (status) {
dev_dbg(&client->dev, "could not create sysfs files\n");
goto fail_restore_config;
}
tmp102->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(tmp102->hwmon_dev)) {
dev_dbg(&client->dev, "unable to register hwmon device\n");
status = PTR_ERR(tmp102->hwmon_dev);
goto fail_remove_sysfs;
}
dev_info(&client->dev, "initialized\n");
return 0;
fail_remove_sysfs:
sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
fail_restore_config:
tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
fail_free:
kfree(tmp102);
return status;
}
static int __devexit tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
hwmon_device_unregister(tmp102->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
/* Stop monitoring if device was stopped originally */
if (tmp102->config_orig & TMP102_CONF_SD) {
int config;
config = tmp102_read_reg(client, TMP102_CONF_REG);
if (config >= 0)
tmp102_write_reg(client, TMP102_CONF_REG,
config | TMP102_CONF_SD);
}
kfree(tmp102);
return 0;
}
#ifdef CONFIG_PM
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int config;
config = tmp102_read_reg(client, TMP102_CONF_REG);
if (config < 0)
return config;
config |= TMP102_CONF_SD;
return tmp102_write_reg(client, TMP102_CONF_REG, config);
}
static int tmp102_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int config;
config = tmp102_read_reg(client, TMP102_CONF_REG);
if (config < 0)
return config;
config &= ~TMP102_CONF_SD;
return tmp102_write_reg(client, TMP102_CONF_REG, config);
}
static const struct dev_pm_ops tmp102_dev_pm_ops = {
.suspend = tmp102_suspend,
.resume = tmp102_resume,
};
#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
#else
#define TMP102_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
static const struct i2c_device_id tmp102_id[] = {
{ "tmp102", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp102_id);
static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.pm = TMP102_DEV_PM_OPS,
.probe = tmp102_probe,
.remove = __devexit_p(tmp102_remove),
.id_table = tmp102_id,
};
static int __init tmp102_init(void)
{
return i2c_add_driver(&tmp102_driver);
}
module_init(tmp102_init);
static void __exit tmp102_exit(void)
{
i2c_del_driver(&tmp102_driver);
}
module_exit(tmp102_exit);
MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
freexperia/android_kernel_sony_tegra | arch/mips/nxp/pnx8550/common/setup.c | 3444 | 4044 | /*
*
* 2.6 port, Embedded Alley Solutions, Inc
*
* Based on Per Hallsmark, per.hallsmark@mvista.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/serial_pnx8xxx.h>
#include <linux/pm.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/mipsregs.h>
#include <asm/reboot.h>
#include <asm/pgtable.h>
#include <asm/time.h>
#include <glb.h>
#include <int.h>
#include <pci.h>
#include <uart.h>
#include <nand.h>
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
struct resource standard_io_resources[] = {
{
.start = 0x00,
.end = 0x1f,
.name = "dma1",
.flags = IORESOURCE_BUSY
}, {
.start = 0x40,
.end = 0x5f,
.name = "timer",
.flags = IORESOURCE_BUSY
}, {
.start = 0x80,
.end = 0x8f,
.name = "dma page reg",
.flags = IORESOURCE_BUSY
}, {
.start = 0xc0,
.end = 0xdf,
.name = "dma2",
.flags = IORESOURCE_BUSY
},
};
#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
extern struct resource pci_io_resource;
extern struct resource pci_mem_resource;
/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
unsigned long get_system_mem_size(void)
{
/* Read IP2031_RANK0_ADDR_LO */
unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
/* Read IP2031_RANK1_ADDR_HI */
unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
return dram_r1_hi - dram_r0_lo + 1;
}
int pnx8550_console_port = -1;
void __init plat_mem_setup(void)
{
int i;
char* argptr;
board_setup(); /* board specific setup */
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
pm_power_off = pnx8550_machine_power_off;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown
-> 0:DACs are enabled and are working normally
1:DACs are powerdown
Bit 0:Enable of PCI inta output
-> 0 = Disable PCI inta output
1 = Enable PCI inta output
*/
PNX8550_GLB2_ENAB_INTA_O = 0;
/* IO/MEM resources. */
set_io_port_base(PNX8550_PORT_BASE);
ioport_resource.start = 0;
ioport_resource.end = ~0;
iomem_resource.start = 0;
iomem_resource.end = ~0;
/* Request I/O space for devices on this board */
for (i = 0; i < STANDARD_IO_RESOURCES; i++)
request_resource(&ioport_resource, standard_io_resources + i);
/* Place the Mode Control bit for GPIO pin 16 in primary function */
/* Pin 16 is used by UART1, UA1_TX */
outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
(PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
PNX8550_GPIO_MC1);
argptr = prom_getcmdline();
if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
argptr += strlen("console=ttyS");
pnx8550_console_port = *argptr == '0' ? 0 : 1;
/* We must initialize the UART (console) before early printk */
/* Set LCR to 8-bit and BAUD to 38400 (no 5) */
ip3106_lcr(UART_BASE, pnx8550_console_port) =
PNX8XXX_UART_LCR_8BIT;
ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
}
return;
}
| gpl-2.0 |
davidmueller13/TW_Kernel_LP | drivers/mtd/devices/sst25l.c | 4980 | 10086 | /*
* sst25l.c
*
* Driver for SST25L SPI Flash chips
*
* Copyright © 2009 Bluewater Systems Ltd
* Author: Andre Renaud <andre@bluewatersys.com>
* Author: Ryan Mallon
*
* Based on m25p80.c
*
* This code is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
/* Erases can take up to 3 seconds! */
#define MAX_READY_WAIT_JIFFIES msecs_to_jiffies(3000)
#define SST25L_CMD_WRSR 0x01 /* Write status register */
#define SST25L_CMD_WRDI 0x04 /* Write disable */
#define SST25L_CMD_RDSR 0x05 /* Read status register */
#define SST25L_CMD_WREN 0x06 /* Write enable */
#define SST25L_CMD_READ 0x03 /* High speed read */
#define SST25L_CMD_EWSR 0x50 /* Enable write status register */
#define SST25L_CMD_SECTOR_ERASE 0x20 /* Erase sector */
#define SST25L_CMD_READ_ID 0x90 /* Read device ID */
#define SST25L_CMD_AAI_PROGRAM 0xaf /* Auto address increment */
#define SST25L_STATUS_BUSY (1 << 0) /* Chip is busy */
#define SST25L_STATUS_WREN (1 << 1) /* Write enabled */
#define SST25L_STATUS_BP0 (1 << 2) /* Block protection 0 */
#define SST25L_STATUS_BP1 (1 << 3) /* Block protection 1 */
struct sst25l_flash {
struct spi_device *spi;
struct mutex lock;
struct mtd_info mtd;
};
struct flash_info {
const char *name;
uint16_t device_id;
unsigned page_size;
unsigned nr_pages;
unsigned erase_size;
};
#define to_sst25l_flash(x) container_of(x, struct sst25l_flash, mtd)
static struct flash_info __devinitdata sst25l_flash_info[] = {
{"sst25lf020a", 0xbf43, 256, 1024, 4096},
{"sst25lf040a", 0xbf44, 256, 2048, 4096},
};
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[2];
int err;
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_RDSR;
cmd_resp[1] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
*status = cmd_resp[1];
return 0;
}
static int sst25l_write_enable(struct sst25l_flash *flash, int enable)
{
unsigned char command[2];
int status, err;
command[0] = enable ? SST25L_CMD_WREN : SST25L_CMD_WRDI;
err = spi_write(flash->spi, command, 1);
if (err)
return err;
command[0] = SST25L_CMD_EWSR;
err = spi_write(flash->spi, command, 1);
if (err)
return err;
command[0] = SST25L_CMD_WRSR;
command[1] = enable ? 0 : SST25L_STATUS_BP0 | SST25L_STATUS_BP1;
err = spi_write(flash->spi, command, 2);
if (err)
return err;
if (enable) {
err = sst25l_status(flash, &status);
if (err)
return err;
if (!(status & SST25L_STATUS_WREN))
return -EROFS;
}
return 0;
}
static int sst25l_wait_till_ready(struct sst25l_flash *flash)
{
unsigned long deadline;
int status, err;
deadline = jiffies + MAX_READY_WAIT_JIFFIES;
do {
err = sst25l_status(flash, &status);
if (err)
return err;
if (!(status & SST25L_STATUS_BUSY))
return 0;
cond_resched();
} while (!time_after_eq(jiffies, deadline));
return -ETIMEDOUT;
}
static int sst25l_erase_sector(struct sst25l_flash *flash, uint32_t offset)
{
unsigned char command[4];
int err;
err = sst25l_write_enable(flash, 1);
if (err)
return err;
command[0] = SST25L_CMD_SECTOR_ERASE;
command[1] = offset >> 16;
command[2] = offset >> 8;
command[3] = offset;
err = spi_write(flash->spi, command, 4);
if (err)
return err;
err = sst25l_wait_till_ready(flash);
if (err)
return err;
return sst25l_write_enable(flash, 0);
}
static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
uint32_t addr, end;
int err;
/* Sanity checks */
if ((uint32_t)instr->len % mtd->erasesize)
return -EINVAL;
if ((uint32_t)instr->addr % mtd->erasesize)
return -EINVAL;
addr = instr->addr;
end = addr + instr->len;
mutex_lock(&flash->lock);
err = sst25l_wait_till_ready(flash);
if (err) {
mutex_unlock(&flash->lock);
return err;
}
while (addr < end) {
err = sst25l_erase_sector(flash, addr);
if (err) {
mutex_unlock(&flash->lock);
instr->state = MTD_ERASE_FAILED;
dev_err(&flash->spi->dev, "Erase failed\n");
return err;
}
addr += mtd->erasesize;
}
mutex_unlock(&flash->lock);
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
struct spi_transfer transfer[2];
struct spi_message message;
unsigned char command[4];
int ret;
spi_message_init(&message);
memset(&transfer, 0, sizeof(transfer));
command[0] = SST25L_CMD_READ;
command[1] = from >> 16;
command[2] = from >> 8;
command[3] = from;
transfer[0].tx_buf = command;
transfer[0].len = sizeof(command);
spi_message_add_tail(&transfer[0], &message);
transfer[1].rx_buf = buf;
transfer[1].len = len;
spi_message_add_tail(&transfer[1], &message);
mutex_lock(&flash->lock);
/* Wait for previous write/erase to complete */
ret = sst25l_wait_till_ready(flash);
if (ret) {
mutex_unlock(&flash->lock);
return ret;
}
spi_sync(flash->spi, &message);
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
mutex_unlock(&flash->lock);
return 0;
}
static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const unsigned char *buf)
{
struct sst25l_flash *flash = to_sst25l_flash(mtd);
int i, j, ret, bytes, copied = 0;
unsigned char command[5];
if ((uint32_t)to % mtd->writesize)
return -EINVAL;
mutex_lock(&flash->lock);
ret = sst25l_write_enable(flash, 1);
if (ret)
goto out;
for (i = 0; i < len; i += mtd->writesize) {
ret = sst25l_wait_till_ready(flash);
if (ret)
goto out;
/* Write the first byte of the page */
command[0] = SST25L_CMD_AAI_PROGRAM;
command[1] = (to + i) >> 16;
command[2] = (to + i) >> 8;
command[3] = (to + i);
command[4] = buf[i];
ret = spi_write(flash->spi, command, 5);
if (ret < 0)
goto out;
copied++;
/*
* Write the remaining bytes using auto address
* increment mode
*/
bytes = min_t(uint32_t, mtd->writesize, len - i);
for (j = 1; j < bytes; j++, copied++) {
ret = sst25l_wait_till_ready(flash);
if (ret)
goto out;
command[1] = buf[i + j];
ret = spi_write(flash->spi, command, 2);
if (ret)
goto out;
}
}
out:
ret = sst25l_write_enable(flash, 0);
if (retlen)
*retlen = copied;
mutex_unlock(&flash->lock);
return ret;
}
static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
struct spi_message m;
struct spi_transfer t;
unsigned char cmd_resp[6];
int i, err;
uint16_t id;
spi_message_init(&m);
memset(&t, 0, sizeof(struct spi_transfer));
cmd_resp[0] = SST25L_CMD_READ_ID;
cmd_resp[1] = 0;
cmd_resp[2] = 0;
cmd_resp[3] = 0;
cmd_resp[4] = 0xff;
cmd_resp[5] = 0xff;
t.tx_buf = cmd_resp;
t.rx_buf = cmd_resp;
t.len = sizeof(cmd_resp);
spi_message_add_tail(&t, &m);
err = spi_sync(spi, &m);
if (err < 0) {
dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
flash_info = &sst25l_flash_info[i];
if (!flash_info)
dev_err(&spi->dev, "unknown id %.4x\n", id);
return flash_info;
}
static int __devinit sst25l_probe(struct spi_device *spi)
{
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
return -ENODEV;
flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
if (!flash)
return -ENOMEM;
flash->spi = spi;
mutex_init(&flash->lock);
dev_set_drvdata(&spi->dev, flash);
data = spi->dev.platform_data;
if (data && data->name)
flash->mtd.name = data->name;
else
flash->mtd.name = dev_name(&spi->dev);
flash->mtd.type = MTD_NORFLASH;
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
flash->mtd.writesize = flash_info->page_size;
flash->mtd.writebufsize = flash_info->page_size;
flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
flash->mtd._erase = sst25l_erase;
flash->mtd._read = sst25l_read;
flash->mtd._write = sst25l_write;
dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
(long long)flash->mtd.size >> 10);
pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
flash->mtd.name,
(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
data ? data->parts : NULL,
data ? data->nr_parts : 0);
if (ret) {
kfree(flash);
dev_set_drvdata(&spi->dev, NULL);
return -ENODEV;
}
return 0;
}
static int __devexit sst25l_remove(struct spi_device *spi)
{
struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
int ret;
ret = mtd_device_unregister(&flash->mtd);
if (ret == 0)
kfree(flash);
return ret;
}
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
.remove = __devexit_p(sst25l_remove),
};
module_spi_driver(sst25l_driver);
MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
"Ryan Mallon");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mr-tweaker/sabermod_kernel_armani | drivers/watchdog/nv_tco.c | 4980 | 12411 | /*
* nv_tco 0.01: TCO timer driver for NV chipsets
*
* (c) Copyright 2005 Google Inc., All Rights Reserved.
*
* Based off i8xx_tco.c:
* (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
* Reserved.
* http://www.kernelconcepts.de
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* TCO timer driver for NV chipsets
* based on softdog.c by Alan Cox <alan@redhat.com>
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "nv_tco.h"
/* Module and version information */
#define TCO_VERSION "0.01"
#define TCO_MODULE_NAME "NV_TCO"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
/* internal variables */
static unsigned int tcobase;
static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
static unsigned long timer_alive;
static char tco_expect_close;
static struct pci_dev *tco_pci;
/* the watchdog platform device */
static struct platform_device *nv_tco_platform_device;
/* module parameters */
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (2<heartbeat<39) */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
"default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Some TCO specific functions
*/
static inline unsigned char seconds_to_ticks(int seconds)
{
/* the internal timer is stored as ticks which decrement
* every 0.6 seconds */
return (seconds * 10) / 6;
}
static void tco_timer_start(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
val = inl(TCO_CNT(tcobase));
val &= ~TCO_CNT_TCOHALT;
outl(val, TCO_CNT(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static void tco_timer_stop(void)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
val = inl(TCO_CNT(tcobase));
val |= TCO_CNT_TCOHALT;
outl(val, TCO_CNT(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static void tco_timer_keepalive(void)
{
unsigned long flags;
spin_lock_irqsave(&tco_lock, flags);
outb(0x01, TCO_RLD(tcobase));
spin_unlock_irqrestore(&tco_lock, flags);
}
static int tco_timer_set_heartbeat(int t)
{
int ret = 0;
unsigned char tmrval;
unsigned long flags;
u8 val;
/*
* note seconds_to_ticks(t) > t, so if t > 0x3f, so is
* tmrval=seconds_to_ticks(t). Check that the count in seconds isn't
* out of range on it's own (to avoid overflow in tmrval).
*/
if (t < 0 || t > 0x3f)
return -EINVAL;
tmrval = seconds_to_ticks(t);
/* "Values of 0h-3h are ignored and should not be attempted" */
if (tmrval > 0x3f || tmrval < 0x04)
return -EINVAL;
/* Write new heartbeat to watchdog */
spin_lock_irqsave(&tco_lock, flags);
val = inb(TCO_TMR(tcobase));
val &= 0xc0;
val |= tmrval;
outb(val, TCO_TMR(tcobase));
val = inb(TCO_TMR(tcobase));
if ((val & 0x3f) != tmrval)
ret = -EINVAL;
spin_unlock_irqrestore(&tco_lock, flags);
if (ret)
return ret;
heartbeat = t;
return 0;
}
/*
* /dev/watchdog handling
*/
static int nv_tco_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &timer_alive))
return -EBUSY;
/* Reload and activate timer */
tco_timer_keepalive();
tco_timer_start();
return nonseekable_open(inode, file);
}
static int nv_tco_release(struct inode *inode, struct file *file)
{
/* Shut off the timer */
if (tco_expect_close == 42) {
tco_timer_stop();
} else {
pr_crit("Unexpected close, not stopping watchdog!\n");
tco_timer_keepalive();
}
clear_bit(0, &timer_alive);
tco_expect_close = 0;
return 0;
}
static ssize_t nv_tco_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
size_t i;
/*
* note: just in case someone wrote the magic character
* five months ago...
*/
tco_expect_close = 0;
/*
* scan to see whether or not we got the magic
* character
*/
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
tco_expect_close = 42;
}
}
/* someone wrote to us, we should reload the timer */
tco_timer_keepalive();
}
return len;
}
static long nv_tco_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int new_options, retval = -EINVAL;
int new_heartbeat;
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = TCO_MODULE_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
tco_timer_stop();
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
tco_timer_keepalive();
tco_timer_start();
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
tco_timer_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, p))
return -EFAULT;
if (tco_timer_set_heartbeat(new_heartbeat))
return -EINVAL;
tco_timer_keepalive();
/* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
default:
return -ENOTTY;
}
}
/*
* Kernel Interfaces
*/
static const struct file_operations nv_tco_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
.open = nv_tco_open,
.release = nv_tco_release,
};
static struct miscdevice nv_tco_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &nv_tco_fops,
};
/*
* Data for PCI driver interface
*
* This data only exists for exporting the supported
* PCI ids via MODULE_DEVICE_TABLE. We do not actually
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
/*
* Init & exit routines
*/
static unsigned char __devinit nv_tco_getdevice(void)
{
struct pci_dev *dev = NULL;
u32 val;
/* Find the PCI device */
for_each_pci_dev(dev) {
if (pci_match_id(tco_pci_tbl, dev) != NULL) {
tco_pci = dev;
break;
}
}
if (!tco_pci)
return 0;
/* Find the base io port */
pci_read_config_dword(tco_pci, 0x64, &val);
val &= 0xffff;
if (val == 0x0001 || val == 0x0000) {
/* Something is wrong here, bar isn't setup */
pr_err("failed to get tcobase address\n");
return 0;
}
val &= 0xff00;
tcobase = val + 0x40;
if (!request_region(tcobase, 0x10, "NV TCO")) {
pr_err("I/O address 0x%04x already in use\n", tcobase);
return 0;
}
/* Set a reasonable heartbeat before we stop the timer */
tco_timer_set_heartbeat(30);
/*
* Stop the TCO before we change anything so we don't race with
* a zeroed timer.
*/
tco_timer_keepalive();
tco_timer_stop();
/* Disable SMI caused by TCO */
if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
pr_err("I/O address 0x%04x already in use\n",
MCP51_SMI_EN(tcobase));
goto out;
}
val = inl(MCP51_SMI_EN(tcobase));
val &= ~MCP51_SMI_EN_TCO;
outl(val, MCP51_SMI_EN(tcobase));
val = inl(MCP51_SMI_EN(tcobase));
release_region(MCP51_SMI_EN(tcobase), 4);
if (val & MCP51_SMI_EN_TCO) {
pr_err("Could not disable SMI caused by TCO\n");
goto out;
}
/* Check chipset's NO_REBOOT bit */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware\n");
goto out;
}
return 1;
out:
release_region(tcobase, 0x10);
return 0;
}
static int __devinit nv_tco_init(struct platform_device *dev)
{
int ret;
/* Check whether or not the hardware watchdog is there */
if (!nv_tco_getdevice())
return -ENODEV;
/* Check to see if last reboot was due to watchdog timeout */
pr_info("Watchdog reboot %sdetected\n",
inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
/* Clear out the old status */
outl(TCO_STS_RESET, TCO_STS(tcobase));
/*
* Check that the heartbeat value is within it's range.
* If not, reset to the default.
*/
if (tco_timer_set_heartbeat(heartbeat)) {
heartbeat = WATCHDOG_HEARTBEAT;
tco_timer_set_heartbeat(heartbeat);
pr_info("heartbeat value must be 2<heartbeat<39, using %d\n",
heartbeat);
}
ret = misc_register(&nv_tco_miscdev);
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto unreg_region;
}
clear_bit(0, &timer_alive);
tco_timer_stop();
pr_info("initialized (0x%04x). heartbeat=%d sec (nowayout=%d)\n",
tcobase, heartbeat, nowayout);
return 0;
unreg_region:
release_region(tcobase, 0x10);
return ret;
}
static void __devexit nv_tco_cleanup(void)
{
u32 val;
/* Stop the timer before we leave */
if (!nowayout)
tco_timer_stop();
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
pr_crit("Couldn't unset REBOOT bit. Machine may soon reset\n");
}
/* Deregister */
misc_deregister(&nv_tco_miscdev);
release_region(tcobase, 0x10);
}
static int __devexit nv_tco_remove(struct platform_device *dev)
{
if (tcobase)
nv_tco_cleanup();
return 0;
}
static void nv_tco_shutdown(struct platform_device *dev)
{
u32 val;
tco_timer_stop();
/* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
* unset during shutdown. */
pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
}
static struct platform_driver nv_tco_driver = {
.probe = nv_tco_init,
.remove = __devexit_p(nv_tco_remove),
.shutdown = nv_tco_shutdown,
.driver = {
.owner = THIS_MODULE,
.name = TCO_MODULE_NAME,
},
};
static int __init nv_tco_init_module(void)
{
int err;
pr_info("NV TCO WatchDog Timer Driver v%s\n", TCO_VERSION);
err = platform_driver_register(&nv_tco_driver);
if (err)
return err;
nv_tco_platform_device = platform_device_register_simple(
TCO_MODULE_NAME, -1, NULL, 0);
if (IS_ERR(nv_tco_platform_device)) {
err = PTR_ERR(nv_tco_platform_device);
goto unreg_platform_driver;
}
return 0;
unreg_platform_driver:
platform_driver_unregister(&nv_tco_driver);
return err;
}
static void __exit nv_tco_cleanup_module(void)
{
platform_device_unregister(nv_tco_platform_device);
platform_driver_unregister(&nv_tco_driver);
pr_info("NV TCO Watchdog Module Unloaded\n");
}
module_init(nv_tco_init_module);
module_exit(nv_tco_cleanup_module);
MODULE_AUTHOR("Mike Waychison");
MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
mathieudevos/linux_kernel_3.2.48 | arch/m32r/platforms/usrv/io.c | 13940 | 5665 | /*
* linux/arch/m32r/platforms/usrv/io.c
*
* Typical I/O routines for uServer board.
*
* Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto, Takeo Takahashi
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
*/
#include <asm/m32r.h>
#include <asm/page.h>
#include <asm/io.h>
#include <linux/types.h>
#include "../../../../drivers/pcmcia/m32r_cfc.h"
extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
#define CFC_IOSTART CFC_IOPORT_BASE
#define CFC_IOEND (CFC_IOSTART + (M32R_PCC_MAPSIZE * M32R_MAX_PCC) - 1)
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
#define UART0_REGSTART 0x04c20000
#define UART1_REGSTART 0x04c20100
#define UART_IOMAP_SIZE 8
#define UART0_IOSTART 0x3f8
#define UART0_IOEND (UART0_IOSTART + UART_IOMAP_SIZE - 1)
#define UART1_IOSTART 0x2f8
#define UART1_IOEND (UART1_IOSTART + UART_IOMAP_SIZE - 1)
#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
#define PORT2ADDR(port) _port2addr(port)
static inline void *_port2addr(unsigned long port)
{
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
if (port >= UART0_IOSTART && port <= UART0_IOEND)
port = ((port - UART0_IOSTART) << 1) + UART0_REGSTART;
else if (port >= UART1_IOSTART && port <= UART1_IOEND)
port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART;
#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
return (void *)(port | (NONCACHE_OFFSET));
}
static inline void delay(void)
{
__asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
}
unsigned char _inb(unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND) {
unsigned char b;
pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0);
return b;
} else
return *(volatile unsigned char *)PORT2ADDR(port);
}
unsigned short _inw(unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND) {
unsigned short w;
pcc_ioread_word(0, port, &w, sizeof(w), 1, 0);
return w;
} else
return *(volatile unsigned short *)PORT2ADDR(port);
}
unsigned long _inl(unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND) {
unsigned long l;
pcc_ioread_word(0, port, &l, sizeof(l), 1, 0);
return l;
} else
return *(volatile unsigned long *)PORT2ADDR(port);
}
unsigned char _inb_p(unsigned long port)
{
unsigned char v = _inb(port);
delay();
return v;
}
unsigned short _inw_p(unsigned long port)
{
unsigned short v = _inw(port);
delay();
return v;
}
unsigned long _inl_p(unsigned long port)
{
unsigned long v = _inl(port);
delay();
return v;
}
void _outb(unsigned char b, unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0);
else
*(volatile unsigned char *)PORT2ADDR(port) = b;
}
void _outw(unsigned short w, unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0);
else
*(volatile unsigned short *)PORT2ADDR(port) = w;
}
void _outl(unsigned long l, unsigned long port)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0);
else
*(volatile unsigned long *)PORT2ADDR(port) = l;
}
void _outb_p(unsigned char b, unsigned long port)
{
_outb(b, port);
delay();
}
void _outw_p(unsigned short w, unsigned long port)
{
_outw(w, port);
delay();
}
void _outl_p(unsigned long l, unsigned long port)
{
_outl(l, port);
delay();
}
void _insb(unsigned int port, void * addr, unsigned long count)
{
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_ioread_byte(0, port, addr, sizeof(unsigned char), count, 1);
else {
unsigned char *buf = addr;
unsigned char *portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned char *)portp;
}
}
void _insw(unsigned int port, void * addr, unsigned long count)
{
unsigned short *buf = addr;
unsigned short *portp;
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_ioread_word(0, port, addr, sizeof(unsigned short), count,
1);
else {
portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned short *)portp;
}
}
void _insl(unsigned int port, void * addr, unsigned long count)
{
unsigned long *buf = addr;
unsigned long *portp;
portp = PORT2ADDR(port);
while (count--)
*buf++ = *(volatile unsigned long *)portp;
}
void _outsb(unsigned int port, const void * addr, unsigned long count)
{
const unsigned char *buf = addr;
unsigned char *portp;
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char),
count, 1);
else {
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned char *)portp = *buf++;
}
}
void _outsw(unsigned int port, const void * addr, unsigned long count)
{
const unsigned short *buf = addr;
unsigned short *portp;
if (port >= CFC_IOSTART && port <= CFC_IOEND)
pcc_iowrite_word(0, port, (void *)addr, sizeof(unsigned short),
count, 1);
else {
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned short *)portp = *buf++;
}
}
void _outsl(unsigned int port, const void * addr, unsigned long count)
{
const unsigned long *buf = addr;
unsigned char *portp;
portp = PORT2ADDR(port);
while (count--)
*(volatile unsigned long *)portp = *buf++;
}
| gpl-2.0 |
gamerlulea/linux.2.6-custom | drivers/infiniband/hw/qib/qib_user_sdma.c | 117 | 21607 | /*
* Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/uio.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "qib.h"
#include "qib_user_sdma.h"
/* minimum size of header */
#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
/* expected size of headers (for dma_pool) */
#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
/* attempt to drain the queue for 5secs */
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
struct qib_user_sdma_pkt {
u8 naddr; /* dimension of addr (1..3) ... */
u32 counter; /* sdma pkts queued counter for this entry */
u64 added; /* global descq number of entries */
struct {
u32 offset; /* offset for kvaddr, addr */
u32 length; /* length in page */
u8 put_page; /* should we put_page? */
u8 dma_mapped; /* is page dma_mapped? */
struct page *page; /* may be NULL (coherent mem) */
void *kvaddr; /* FIXME: only for pio hack */
dma_addr_t addr;
} addr[4]; /* max pages, any more and we coalesce */
struct list_head list; /* list element */
};
struct qib_user_sdma_queue {
/*
* pkts sent to dma engine are queued on this
* list head. the type of the elements of this
* list are struct qib_user_sdma_pkt...
*/
struct list_head sent;
/* headers with expected length are allocated from here... */
char header_cache_name[64];
struct dma_pool *header_cache;
/* packets are allocated from the slab cache... */
char pkt_slab_name[64];
struct kmem_cache *pkt_slab;
/* as packets go on the queued queue, they are counted... */
u32 counter;
u32 sent_counter;
/* dma page table */
struct rb_root dma_pages_root;
/* protect everything above... */
struct mutex lock;
};
struct qib_user_sdma_queue *
qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
{
struct qib_user_sdma_queue *pq =
kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
if (!pq)
goto done;
pq->counter = 0;
pq->sent_counter = 0;
INIT_LIST_HEAD(&pq->sent);
mutex_init(&pq->lock);
snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
"qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
sizeof(struct qib_user_sdma_pkt),
0, 0, NULL);
if (!pq->pkt_slab)
goto err_kfree;
snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
"qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
pq->header_cache = dma_pool_create(pq->header_cache_name,
dev,
QIB_USER_SDMA_EXP_HEADER_LENGTH,
4, 0);
if (!pq->header_cache)
goto err_slab;
pq->dma_pages_root = RB_ROOT;
goto done;
err_slab:
kmem_cache_destroy(pq->pkt_slab);
err_kfree:
kfree(pq);
pq = NULL;
done:
return pq;
}
static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
int i, size_t offset, size_t len,
int put_page, int dma_mapped,
struct page *page,
void *kvaddr, dma_addr_t dma_addr)
{
pkt->addr[i].offset = offset;
pkt->addr[i].length = len;
pkt->addr[i].put_page = put_page;
pkt->addr[i].dma_mapped = dma_mapped;
pkt->addr[i].page = page;
pkt->addr[i].kvaddr = kvaddr;
pkt->addr[i].addr = dma_addr;
}
static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
u32 counter, size_t offset,
size_t len, int dma_mapped,
struct page *page,
void *kvaddr, dma_addr_t dma_addr)
{
pkt->naddr = 1;
pkt->counter = counter;
qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
kvaddr, dma_addr);
}
/* we've too many pages in the iovec, coalesce to a single page */
static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov)
{
int ret = 0;
struct page *page = alloc_page(GFP_KERNEL);
void *mpage_save;
char *mpage;
int i;
int len = 0;
dma_addr_t dma_addr;
if (!page) {
ret = -ENOMEM;
goto done;
}
mpage = kmap(page);
mpage_save = mpage;
for (i = 0; i < niov; i++) {
int cfur;
cfur = copy_from_user(mpage,
iov[i].iov_base, iov[i].iov_len);
if (cfur) {
ret = -EFAULT;
goto free_unmap;
}
mpage += iov[i].iov_len;
len += iov[i].iov_len;
}
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_unmap;
}
qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
dma_addr);
pkt->naddr = 2;
goto done;
free_unmap:
kunmap(page);
__free_page(page);
done:
return ret;
}
/*
* How many pages in this iovec element?
*/
static int qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
const unsigned long spage = addr & PAGE_MASK;
const unsigned long epage = (addr + len - 1) & PAGE_MASK;
return 1 + ((epage - spage) >> PAGE_SHIFT);
}
/*
* Truncate length to page boundry.
*/
static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
{
const unsigned long offset = addr & ~PAGE_MASK;
return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
}
static void qib_user_sdma_free_pkt_frag(struct device *dev,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
int frag)
{
const int i = frag;
if (pkt->addr[i].page) {
if (pkt->addr[i].dma_mapped)
dma_unmap_page(dev,
pkt->addr[i].addr,
pkt->addr[i].length,
DMA_TO_DEVICE);
if (pkt->addr[i].kvaddr)
kunmap(pkt->addr[i].page);
if (pkt->addr[i].put_page)
put_page(pkt->addr[i].page);
else
__free_page(pkt->addr[i].page);
} else if (pkt->addr[i].kvaddr)
/* free coherent mem from cache... */
dma_pool_free(pq->header_cache,
pkt->addr[i].kvaddr, pkt->addr[i].addr);
}
/* return number of pages pinned... */
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_pkt *pkt,
unsigned long addr, int tlen, int npages)
{
struct page *pages[2];
int j;
int ret;
ret = get_user_pages(current, current->mm, addr,
npages, 0, 1, pages, NULL);
if (ret != npages) {
int i;
for (i = 0; i < ret; i++)
put_page(pages[i]);
ret = -ENOMEM;
goto done;
}
for (j = 0; j < npages; j++) {
/* map the pages... */
const int flen = qib_user_sdma_page_length(addr, tlen);
dma_addr_t dma_addr =
dma_map_page(&dd->pcidev->dev,
pages[j], 0, flen, DMA_TO_DEVICE);
unsigned long fofs = addr & ~PAGE_MASK;
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto done;
}
qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
pages[j], kmap(pages[j]), dma_addr);
pkt->naddr++;
addr += flen;
tlen -= flen;
}
done:
return ret;
}
static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov)
{
int ret = 0;
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
const int npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pkt, addr,
iov[idx].iov_len, npages);
if (ret < 0)
goto free_pkt;
}
goto done;
free_pkt:
for (idx = 0; idx < pkt->naddr; idx++)
qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
done:
return ret;
}
static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov, int npages)
{
int ret = 0;
if (npages >= ARRAY_SIZE(pkt->addr))
ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
else
ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
return ret;
}
/* free a packet list -- return counter value of last packet */
static void qib_user_sdma_free_pkt_list(struct device *dev,
struct qib_user_sdma_queue *pq,
struct list_head *list)
{
struct qib_user_sdma_pkt *pkt, *pkt_next;
list_for_each_entry_safe(pkt, pkt_next, list, list) {
int i;
for (i = 0; i < pkt->naddr; i++)
qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
kmem_cache_free(pq->pkt_slab, pkt);
}
INIT_LIST_HEAD(list);
}
/*
* copy headers, coalesce etc -- pq->lock must be held
*
* we queue all the packets to list, returning the
* number of bytes total. list must be empty initially,
* as, if there is an error we clean it...
*/
static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct list_head *list,
const struct iovec *iov,
unsigned long niov,
int maxpkts)
{
unsigned long idx = 0;
int ret = 0;
int npkts = 0;
struct page *page = NULL;
__le32 *pbc;
dma_addr_t dma_addr;
struct qib_user_sdma_pkt *pkt = NULL;
size_t len;
size_t nw;
u32 counter = pq->counter;
int dma_mapped = 0;
while (idx < niov && npkts < maxpkts) {
const unsigned long addr = (unsigned long) iov[idx].iov_base;
const unsigned long idx_save = idx;
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
int npages = 0;
int cfur;
dma_mapped = 0;
len = iov[idx].iov_len;
nw = len >> 2;
page = NULL;
pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_list;
}
if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
len > PAGE_SIZE || len & 3 || addr & 3) {
ret = -EINVAL;
goto free_pkt;
}
if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
&dma_addr);
else
pbc = NULL;
if (!pbc) {
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto free_pkt;
}
pbc = kmap(page);
}
cfur = copy_from_user(pbc, iov[idx].iov_base, len);
if (cfur) {
ret = -EFAULT;
goto free_pbc;
}
/*
* This assignment is a bit strange. it's because the
* the pbc counts the number of 32 bit words in the full
* packet _except_ the first word of the pbc itself...
*/
pktnwc = nw - 1;
/*
* pktnw computation yields the number of 32 bit words
* that the caller has indicated in the PBC. note that
* this is one less than the total number of words that
* goes to the send DMA engine as the first 32 bit word
* of the PBC itself is not counted. Armed with this count,
* we can verify that the packet is consistent with the
* iovec lengths.
*/
pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
ret = -EINVAL;
goto free_pbc;
}
idx++;
while (pktnwc < pktnw && idx < niov) {
const size_t slen = iov[idx].iov_len;
const unsigned long faddr =
(unsigned long) iov[idx].iov_base;
if (slen & 3 || faddr & 3 || !slen ||
slen > PAGE_SIZE) {
ret = -EINVAL;
goto free_pbc;
}
npages++;
if ((faddr & PAGE_MASK) !=
((faddr + slen - 1) & PAGE_MASK))
npages++;
pktnwc += slen >> 2;
idx++;
nfrags++;
}
if (pktnwc != pktnw) {
ret = -EINVAL;
goto free_pbc;
}
if (page) {
dma_addr = dma_map_page(&dd->pcidev->dev,
page, 0, len, DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
ret = -ENOMEM;
goto free_pbc;
}
dma_mapped = 1;
}
qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
page, pbc, dma_addr);
if (nfrags) {
ret = qib_user_sdma_init_payload(dd, pq, pkt,
iov + idx_save + 1,
nfrags, npages);
if (ret < 0)
goto free_pbc_dma;
}
counter++;
npkts++;
list_add_tail(&pkt->list, list);
}
ret = idx;
goto done;
free_pbc_dma:
if (dma_mapped)
dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
free_pbc:
if (page) {
kunmap(page);
__free_page(page);
} else
dma_pool_free(pq->header_cache, pbc, dma_addr);
free_pkt:
kmem_cache_free(pq->pkt_slab, pkt);
free_list:
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
done:
return ret;
}
static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
u32 c)
{
pq->sent_counter = c;
}
/* try to clean out queue -- needs pq->lock */
static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
struct qib_devdata *dd = ppd->dd;
struct list_head free_list;
struct qib_user_sdma_pkt *pkt;
struct qib_user_sdma_pkt *pkt_prev;
int ret = 0;
INIT_LIST_HEAD(&free_list);
list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
s64 descd = ppd->sdma_descq_removed - pkt->added;
if (descd < 0)
break;
list_move_tail(&pkt->list, &free_list);
/* one more packet cleaned */
ret++;
}
if (!list_empty(&free_list)) {
u32 counter;
pkt = list_entry(free_list.prev,
struct qib_user_sdma_pkt, list);
counter = pkt->counter;
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
qib_user_sdma_set_complete_counter(pq, counter);
}
return ret;
}
void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
{
if (!pq)
return;
kmem_cache_destroy(pq->pkt_slab);
dma_pool_destroy(pq->header_cache);
kfree(pq);
}
/* clean descriptor queue, returns > 0 if some elements cleaned */
static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
ret = qib_sdma_make_progress(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
/* we're in close, drain packets so that we can cleanup successfully... */
void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
struct qib_devdata *dd = ppd->dd;
int i;
if (!pq)
return;
for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
mutex_lock(&pq->lock);
if (list_empty(&pq->sent)) {
mutex_unlock(&pq->lock);
break;
}
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
msleep(10);
}
if (!list_empty(&pq->sent)) {
struct list_head free_list;
qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
INIT_LIST_HEAD(&free_list);
mutex_lock(&pq->lock);
list_splice_init(&pq->sent, &free_list);
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
mutex_unlock(&pq->lock);
}
}
static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
u64 addr, u64 dwlen, u64 dwoffset)
{
u8 tmpgen;
tmpgen = ppd->sdma_generation;
return cpu_to_le64(/* SDmaPhyAddr[31:0] */
((addr & 0xfffffffcULL) << 32) |
/* SDmaGeneration[1:0] */
((tmpgen & 3ULL) << 30) |
/* SDmaDwordCount[10:0] */
((dwlen & 0x7ffULL) << 16) |
/* SDmaBufOffset[12:2] */
(dwoffset & 0x7ffULL));
}
static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
{
return descq | cpu_to_le64(1ULL << 12);
}
static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
{
/* last */ /* dma head */
return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
}
static inline __le64 qib_sdma_make_desc1(u64 addr)
{
/* SDmaPhyAddr[47:32] */
return cpu_to_le64(addr >> 32);
}
static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
struct qib_user_sdma_pkt *pkt, int idx,
unsigned ofs, u16 tail)
{
const u64 addr = (u64) pkt->addr[idx].addr +
(u64) pkt->addr[idx].offset;
const u64 dwlen = (u64) pkt->addr[idx].length / 4;
__le64 *descqp;
__le64 descq0;
descqp = &ppd->sdma_descq[tail].qw[0];
descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
if (idx == 0)
descq0 = qib_sdma_make_first_desc0(descq0);
if (idx == pkt->naddr - 1)
descq0 = qib_sdma_make_last_desc0(descq0);
descqp[0] = descq0;
descqp[1] = qib_sdma_make_desc1(addr);
}
/* pq->lock must be held, get packets on the wire... */
static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq,
struct list_head *pktlist)
{
struct qib_devdata *dd = ppd->dd;
int ret = 0;
unsigned long flags;
u16 tail;
u8 generation;
u64 descq_added;
if (list_empty(pktlist))
return 0;
if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
return -ECOMM;
spin_lock_irqsave(&ppd->sdma_lock, flags);
/* keep a copy for restoring purposes in case of problems */
generation = ppd->sdma_generation;
descq_added = ppd->sdma_descq_added;
if (unlikely(!__qib_sdma_running(ppd))) {
ret = -ECOMM;
goto unlock;
}
tail = ppd->sdma_descq_tail;
while (!list_empty(pktlist)) {
struct qib_user_sdma_pkt *pkt =
list_entry(pktlist->next, struct qib_user_sdma_pkt,
list);
int i;
unsigned ofs = 0;
u16 dtail = tail;
if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
goto unlock_check_tail;
for (i = 0; i < pkt->naddr; i++) {
qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
ofs += pkt->addr[i].length >> 2;
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
++ppd->sdma_generation;
}
}
if ((ofs << 2) > ppd->ibmaxlen) {
ret = -EMSGSIZE;
goto unlock;
}
/*
* If the packet is >= 2KB mtu equivalent, we have to use
* the large buffers, and have to mark each descriptor as
* part of a large buffer packet.
*/
if (ofs > dd->piosize2kmax_dwords) {
for (i = 0; i < pkt->naddr; i++) {
ppd->sdma_descq[dtail].qw[0] |=
cpu_to_le64(1ULL << 14);
if (++dtail == ppd->sdma_descq_cnt)
dtail = 0;
}
}
ppd->sdma_descq_added += pkt->naddr;
pkt->added = ppd->sdma_descq_added;
list_move_tail(&pkt->list, &pq->sent);
ret++;
}
unlock_check_tail:
/* advance the tail on the chip if necessary */
if (ppd->sdma_descq_tail != tail)
dd->f_sdma_update_tail(ppd, tail);
unlock:
if (unlikely(ret < 0)) {
ppd->sdma_generation = generation;
ppd->sdma_descq_added = descq_added;
}
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
struct qib_user_sdma_queue *pq,
const struct iovec *iov,
unsigned long dim)
{
struct qib_devdata *dd = rcd->dd;
struct qib_pportdata *ppd = rcd->ppd;
int ret = 0;
struct list_head list;
int npkts = 0;
INIT_LIST_HEAD(&list);
mutex_lock(&pq->lock);
/* why not -ECOMM like qib_user_sdma_push_pkts() below? */
if (!qib_sdma_running(ppd))
goto done_unlock;
if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
}
while (dim) {
const int mxp = 8;
down_write(¤t->mm->mmap_sem);
ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
up_write(¤t->mm->mmap_sem);
if (ret <= 0)
goto done_unlock;
else {
dim -= ret;
iov += ret;
}
/* force packets onto the sdma hw queue... */
if (!list_empty(&list)) {
/*
* Lazily clean hw queue. the 4 is a guess of about
* how many sdma descriptors a packet will take (it
* doesn't have to be perfect).
*/
if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
}
ret = qib_user_sdma_push_pkts(ppd, pq, &list);
if (ret < 0)
goto done_unlock;
else {
npkts += ret;
pq->counter += ret;
if (!list_empty(&list))
goto done_unlock;
}
}
}
done_unlock:
if (!list_empty(&list))
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
mutex_unlock(&pq->lock);
return (ret < 0) ? ret : npkts;
}
int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
int ret = 0;
mutex_lock(&pq->lock);
qib_user_sdma_hwqueue_clean(ppd);
ret = qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
return ret;
}
u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
{
return pq ? pq->sent_counter : 0;
}
u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
{
return pq ? pq->counter : 0;
}
| gpl-2.0 |
v1ron/linux-mainline | drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c | 1141 | 1388 | /*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "priv.h"
int
nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
int index, struct nvkm_engine **pengine)
{
return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
}
| gpl-2.0 |
halaszk/Perseus-halaszk-universal5433 | drivers/staging/iio/accel/adis16203_core.c | 2165 | 5911 | /*
* ADIS16203 Programmable Digital Vibration Sensor driver
*
* Copyright 2030 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/imu/adis.h>
#include "adis16203.h"
#define DRIVER_NAME "adis16203"
static const u8 adis16203_addresses[] = {
[ADIS16203_SCAN_INCLI_X] = ADIS16203_INCL_NULL,
};
static int adis16203_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
struct adis *st = iio_priv(indio_dev);
/* currently only one writable parameter which keeps this simple */
u8 addr = adis16203_addresses[chan->scan_index];
return adis_write_reg_16(st, addr, val & 0x3FFF);
}
static int adis16203_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct adis *st = iio_priv(indio_dev);
int ret;
int bits;
u8 addr;
s16 val16;
switch (mask) {
case IIO_CHAN_INFO_RAW:
return adis_single_conversion(indio_dev, chan,
ADIS16203_ERROR_ACTIVE, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
if (chan->channel == 0) {
*val = 1;
*val2 = 220000; /* 1.22 mV */
} else {
*val = 0;
*val2 = 610000; /* 0.61 mV */
}
return IIO_VAL_INT_PLUS_MICRO;
case IIO_TEMP:
*val = -470; /* -0.47 C */
*val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_INCLI:
*val = 0;
*val2 = 25000; /* 0.025 degree */
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
*val = 25000 / -470 - 1278; /* 25 C = 1278 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
bits = 14;
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->scan_index];
ret = adis_read_reg_16(st, addr, &val16);
if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
val16 &= (1 << bits) - 1;
val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static const struct iio_chan_spec adis16203_channels[] = {
ADIS_SUPPLY_CHAN(ADIS16203_SUPPLY_OUT, ADIS16203_SCAN_SUPPLY, 12),
ADIS_AUX_ADC_CHAN(ADIS16203_AUX_ADC, ADIS16203_SCAN_AUX_ADC, 12),
ADIS_INCLI_CHAN(X, ADIS16203_XINCL_OUT, ADIS16203_SCAN_INCLI_X,
BIT(IIO_CHAN_INFO_CALIBBIAS), 14),
/* Fixme: Not what it appears to be - see data sheet */
ADIS_INCLI_CHAN(Y, ADIS16203_YINCL_OUT, ADIS16203_SCAN_INCLI_Y, 0, 14),
ADIS_TEMP_CHAN(ADIS16203_TEMP_OUT, ADIS16203_SCAN_TEMP, 12),
IIO_CHAN_SOFT_TIMESTAMP(5),
};
static const struct iio_info adis16203_info = {
.read_raw = &adis16203_read_raw,
.write_raw = &adis16203_write_raw,
.update_scan_mode = adis_update_scan_mode,
.driver_module = THIS_MODULE,
};
static const char * const adis16203_status_error_msgs[] = {
[ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
[ADIS16203_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
[ADIS16203_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
[ADIS16203_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
[ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
};
static const struct adis_data adis16203_data = {
.read_delay = 20,
.msc_ctrl_reg = ADIS16203_MSC_CTRL,
.glob_cmd_reg = ADIS16203_GLOB_CMD,
.diag_stat_reg = ADIS16203_DIAG_STAT,
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
.startup_delay = ADIS16203_STARTUP_DELAY,
.status_error_msgs = adis16203_status_error_msgs,
.status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
BIT(ADIS16203_DIAG_STAT_SPI_FAIL_BIT) |
BIT(ADIS16203_DIAG_STAT_FLASH_UPT_BIT) |
BIT(ADIS16203_DIAG_STAT_POWER_HIGH_BIT) |
BIT(ADIS16203_DIAG_STAT_POWER_LOW_BIT),
};
static int adis16203_probe(struct spi_device *spi)
{
int ret;
struct iio_dev *indio_dev;
struct adis *st;
/* setup the industrialio driver allocated elements */
indio_dev = iio_device_alloc(sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
indio_dev->name = spi->dev.driver->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->channels = adis16203_channels;
indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
indio_dev->info = &adis16203_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = adis_init(st, indio_dev, spi, &adis16203_data);
if (ret)
goto error_free_dev;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
goto error_free_dev;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
if (ret)
goto error_cleanup_buffer_trigger;
ret = iio_device_register(indio_dev);
if (ret)
goto error_cleanup_buffer_trigger;
return 0;
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
error_free_dev:
iio_device_free(indio_dev);
error_ret:
return ret;
}
static int adis16203_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct adis *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
iio_device_free(indio_dev);
return 0;
}
static struct spi_driver adis16203_driver = {
.driver = {
.name = "adis16203",
.owner = THIS_MODULE,
},
.probe = adis16203_probe,
.remove = adis16203_remove,
};
module_spi_driver(adis16203_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:adis16203");
| gpl-2.0 |
fdroid/gp-peak-kernel | drivers/mfd/twl4030-irq.c | 2933 | 22035 | /*
* twl4030-irq.c - TWL4030/TPS659x0 irq support
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* Modifications to defer interrupt handling to a kernel thread:
* Copyright (C) 2006 MontaVista Software, Inc.
*
* Based on tlv320aic23.c:
* Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
*
* Code cleanup and modifications to IRQ handler.
* by syed khasim <x0khasim@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/i2c/twl.h>
#include "twl-core.h"
/*
* TWL4030 IRQ handling has two stages in hardware, and thus in software.
* The Primary Interrupt Handler (PIH) stage exposes status bits saying
* which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
* SIH modules are more traditional IRQ components, which support per-IRQ
* enable/disable and trigger controls; they do most of the work.
*
* These chips are designed to support IRQ handling from two different
* I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status
* and mask registers in the PIH and SIH modules.
*
* We set up IRQs starting at a platform-specified base, always starting
* with PIH and the SIH for PWR_INT and then usually adding GPIO:
* base + 0 .. base + 7 PIH
* base + 8 .. base + 15 SIH for PWR_INT
* base + 16 .. base + 33 SIH for GPIO
*/
/* PIH register offsets */
#define REG_PIH_ISR_P1 0x01
#define REG_PIH_ISR_P2 0x02
#define REG_PIH_SIR 0x03 /* for testing */
/* Linux could (eventually) use either IRQ line */
static int irq_line;
struct sih {
char name[8];
u8 module; /* module id */
u8 control_offset; /* for SIH_CTRL */
bool set_cor;
u8 bits; /* valid in isr/imr */
u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */
u8 edr_offset;
u8 bytes_edr; /* bytelen of EDR */
u8 irq_lines; /* number of supported irq lines */
/* SIR ignored -- set interrupt, for testing only */
struct sih_irq_data {
u8 isr_offset;
u8 imr_offset;
} mask[2];
/* + 2 bytes padding */
};
static const struct sih *sih_modules;
static int nr_sih_modules;
#define SIH_INITIALIZER(modname, nbits) \
.module = TWL4030_MODULE_ ## modname, \
.control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
.bits = nbits, \
.bytes_ixr = DIV_ROUND_UP(nbits, 8), \
.edr_offset = TWL4030_ ## modname ## _EDR, \
.bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
.irq_lines = 2, \
.mask = { { \
.isr_offset = TWL4030_ ## modname ## _ISR1, \
.imr_offset = TWL4030_ ## modname ## _IMR1, \
}, \
{ \
.isr_offset = TWL4030_ ## modname ## _ISR2, \
.imr_offset = TWL4030_ ## modname ## _IMR2, \
}, },
/* register naming policies are inconsistent ... */
#define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1
#define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD
#define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT
/* Order in this table matches order in PIH_ISR. That is,
* BIT(n) in PIH_ISR is sih_modules[n].
*/
/* sih_modules_twl4030 is used both in twl4030 and twl5030 */
static const struct sih sih_modules_twl4030[6] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
.control_offset = REG_GPIO_SIH_CTRL,
.set_cor = true,
.bits = TWL4030_GPIO_MAX,
.bytes_ixr = 3,
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
.irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
}, {
.isr_offset = REG_GPIO_ISR1B,
.imr_offset = REG_GPIO_IMR1B,
}, },
},
[1] = {
.name = "keypad",
.set_cor = true,
SIH_INITIALIZER(KEYPAD_KEYP, 4)
},
[2] = {
.name = "bci",
.module = TWL4030_MODULE_INTERRUPTS,
.control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
.set_cor = true,
.bits = 12,
.bytes_ixr = 2,
.edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 3,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
}, {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1B,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1B,
}, },
},
[3] = {
.name = "madc",
SIH_INITIALIZER(MADC, 4)
},
[4] = {
/* USB doesn't use the same SIH organization */
.name = "usb",
},
[5] = {
.name = "power",
.set_cor = true,
SIH_INITIALIZER(INT_PWR, 8)
},
/* there are no SIH modules #6 or #7 ... */
};
static const struct sih sih_modules_twl5031[8] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
.control_offset = REG_GPIO_SIH_CTRL,
.set_cor = true,
.bits = TWL4030_GPIO_MAX,
.bytes_ixr = 3,
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
.irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
}, {
.isr_offset = REG_GPIO_ISR1B,
.imr_offset = REG_GPIO_IMR1B,
}, },
},
[1] = {
.name = "keypad",
.set_cor = true,
SIH_INITIALIZER(KEYPAD_KEYP, 4)
},
[2] = {
.name = "bci",
.module = TWL5031_MODULE_INTERRUPTS,
.control_offset = TWL5031_INTERRUPTS_BCISIHCTRL,
.bits = 7,
.bytes_ixr = 1,
.edr_offset = TWL5031_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 2,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL5031_INTERRUPTS_BCIISR1,
.imr_offset = TWL5031_INTERRUPTS_BCIIMR1,
}, {
.isr_offset = TWL5031_INTERRUPTS_BCIISR2,
.imr_offset = TWL5031_INTERRUPTS_BCIIMR2,
}, },
},
[3] = {
.name = "madc",
SIH_INITIALIZER(MADC, 4)
},
[4] = {
/* USB doesn't use the same SIH organization */
.name = "usb",
},
[5] = {
.name = "power",
.set_cor = true,
SIH_INITIALIZER(INT_PWR, 8)
},
[6] = {
/*
* ECI/DBI doesn't use the same SIH organization.
* For example, it supports only one interrupt output line.
* That is, the interrupts are seen on both INT1 and INT2 lines.
*/
.name = "eci_dbi",
.module = TWL5031_MODULE_ACCESSORY,
.bits = 9,
.bytes_ixr = 2,
.irq_lines = 1,
.mask = { {
.isr_offset = TWL5031_ACIIDR_LSB,
.imr_offset = TWL5031_ACIIMR_LSB,
}, },
},
[7] = {
/* Audio accessory */
.name = "audio",
.module = TWL5031_MODULE_ACCESSORY,
.control_offset = TWL5031_ACCSIHCTRL,
.bits = 2,
.bytes_ixr = 1,
.edr_offset = TWL5031_ACCEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 1,
.irq_lines = 2,
.mask = { {
.isr_offset = TWL5031_ACCISR1,
.imr_offset = TWL5031_ACCIMR1,
}, {
.isr_offset = TWL5031_ACCISR2,
.imr_offset = TWL5031_ACCIMR2,
}, },
},
};
#undef TWL4030_MODULE_KEYPAD_KEYP
#undef TWL4030_MODULE_INT_PWR
#undef TWL4030_INT_PWR_EDR
/*----------------------------------------------------------------------*/
static unsigned twl4030_irq_base;
static struct completion irq_event;
/*
* This thread processes interrupts reported by the Primary Interrupt Handler.
*/
static int twl4030_irq_thread(void *data)
{
long irq = (long)data;
static unsigned i2c_errors;
static const unsigned max_i2c_errors = 100;
current->flags |= PF_NOFREEZE;
while (!kthread_should_stop()) {
int ret;
int module_irq;
u8 pih_isr;
/* Wait for IRQ, then read PIH irq status (also blocking) */
wait_for_completion_interruptible(&irq_event);
ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n",
ret);
if (++i2c_errors >= max_i2c_errors) {
printk(KERN_ERR "Maximum I2C error count"
" exceeded. Terminating %s.\n",
__func__);
break;
}
complete(&irq_event);
continue;
}
/* these handlers deal with the relevant SIH irq status */
local_irq_disable();
for (module_irq = twl4030_irq_base;
pih_isr;
pih_isr >>= 1, module_irq++) {
if (pih_isr & 0x1)
generic_handle_irq(module_irq);
}
local_irq_enable();
enable_irq(irq);
}
return 0;
}
/*
* handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
* This is a chained interrupt, so there is no desc->action method for it.
* Now we need to query the interrupt controller in the twl4030 to determine
* which module is generating the interrupt request. However, we can't do i2c
* transactions in interrupt context, so we must defer that work to a kernel
* thread. All we do here is acknowledge and mask the interrupt and wakeup
* the kernel thread.
*/
static irqreturn_t handle_twl4030_pih(int irq, void *devid)
{
/* Acknowledge, clear *AND* mask the interrupt... */
disable_irq_nosync(irq);
complete(devid);
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
/*
* twl4030_init_sih_modules() ... start from a known state where no
* IRQs will be coming in, and where we can quickly enable them then
* handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL.
*
* NOTE: we don't touch EDR registers here; they stay with hardware
* defaults or whatever the last value was. Note that when both EDR
* bits for an IRQ are clear, that's as if its IMR bit is set...
*/
static int twl4030_init_sih_modules(unsigned line)
{
const struct sih *sih;
u8 buf[4];
int i;
int status;
/* line 0 == int1_n signal; line 1 == int2_n signal */
if (line > 1)
return -EINVAL;
irq_line = line;
/* disable all interrupts on our line */
memset(buf, 0xff, sizeof buf);
sih = sih_modules;
for (i = 0; i < nr_sih_modules; i++, sih++) {
/* skip USB -- it's funky */
if (!sih->bytes_ixr)
continue;
/* Not all the SIH modules support multiple interrupt lines */
if (sih->irq_lines <= line)
continue;
status = twl_i2c_write(sih->module, buf,
sih->mask[line].imr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "IMR");
/* Maybe disable "exclusive" mode; buffer second pending irq;
* set Clear-On-Read (COR) bit.
*
* NOTE that sometimes COR polarity is documented as being
* inverted: for MADC, COR=1 means "clear on write".
* And for PWR_INT it's not documented...
*/
if (sih->set_cor) {
status = twl_i2c_write_u8(sih->module,
TWL4030_SIH_CTRL_COR_MASK,
sih->control_offset);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "SIH_CTRL");
}
}
sih = sih_modules;
for (i = 0; i < nr_sih_modules; i++, sih++) {
u8 rxbuf[4];
int j;
/* skip USB */
if (!sih->bytes_ixr)
continue;
/* Not all the SIH modules support multiple interrupt lines */
if (sih->irq_lines <= line)
continue;
/* Clear pending interrupt status. Either the read was
* enough, or we need to write those bits. Repeat, in
* case an IRQ is pending (PENDDIS=0) ... that's not
* uncommon with PWR_INT.PWRON.
*/
for (j = 0; j < 2; j++) {
status = twl_i2c_read(sih->module, rxbuf,
sih->mask[line].isr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "ISR");
if (!sih->set_cor)
status = twl_i2c_write(sih->module, buf,
sih->mask[line].isr_offset,
sih->bytes_ixr);
/* else COR=1 means read sufficed.
* (for most SIH modules...)
*/
}
}
return 0;
}
static inline void activate_irq(int irq)
{
#ifdef CONFIG_ARM
/* ARM requires an extra step to clear IRQ_NOREQUEST, which it
* sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
*/
set_irq_flags(irq, IRQF_VALID);
#else
/* same effect on other architectures */
irq_set_noprobe(irq);
#endif
}
/*----------------------------------------------------------------------*/
static DEFINE_SPINLOCK(sih_agent_lock);
static struct workqueue_struct *wq;
struct sih_agent {
int irq_base;
const struct sih *sih;
u32 imr;
bool imr_change_pending;
struct work_struct mask_work;
u32 edge_change;
struct work_struct edge_work;
};
static void twl4030_sih_do_mask(struct work_struct *work)
{
struct sih_agent *agent;
const struct sih *sih;
union {
u8 bytes[4];
u32 word;
} imr;
int status;
agent = container_of(work, struct sih_agent, mask_work);
/* see what work we have */
spin_lock_irq(&sih_agent_lock);
if (agent->imr_change_pending) {
sih = agent->sih;
/* byte[0] gets overwritten as we write ... */
imr.word = cpu_to_le32(agent->imr << 8);
agent->imr_change_pending = false;
} else
sih = NULL;
spin_unlock_irq(&sih_agent_lock);
if (!sih)
return;
/* write the whole mask ... simpler than subsetting it */
status = twl_i2c_write(sih->module, imr.bytes,
sih->mask[irq_line].imr_offset, sih->bytes_ixr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
"write", status);
}
static void twl4030_sih_do_edge(struct work_struct *work)
{
struct sih_agent *agent;
const struct sih *sih;
u8 bytes[6];
u32 edge_change;
int status;
agent = container_of(work, struct sih_agent, edge_work);
/* see what work we have */
spin_lock_irq(&sih_agent_lock);
edge_change = agent->edge_change;
agent->edge_change = 0;
sih = edge_change ? agent->sih : NULL;
spin_unlock_irq(&sih_agent_lock);
if (!sih)
return;
/* Read, reserving first byte for write scratch. Yes, this
* could be cached for some speedup ... but be careful about
* any processor on the other IRQ line, EDR registers are
* shared.
*/
status = twl_i2c_read(sih->module, bytes + 1,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
"read", status);
return;
}
/* Modify only the bits we know must change */
while (edge_change) {
int i = fls(edge_change) - 1;
struct irq_data *idata = irq_get_irq_data(i + agent->irq_base);
int byte = 1 + (i >> 2);
int off = (i & 0x3) * 2;
unsigned int type;
bytes[byte] &= ~(0x03 << off);
type = irqd_get_trigger_type(idata);
if (type & IRQ_TYPE_EDGE_RISING)
bytes[byte] |= BIT(off + 1);
if (type & IRQ_TYPE_EDGE_FALLING)
bytes[byte] |= BIT(off + 0);
edge_change &= ~BIT(i);
}
/* Write */
status = twl_i2c_write(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
"write", status);
}
/*----------------------------------------------------------------------*/
/*
* All irq_chip methods get issued from code holding irq_desc[irq].lock,
* which can't perform the underlying I2C operations (because they sleep).
* So we must hand them off to a thread (workqueue) and cope with asynch
* completion, potentially including some re-ordering, of these requests.
*/
static void twl4030_sih_mask(struct irq_data *data)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
sih->imr |= BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
static void twl4030_sih_unmask(struct irq_data *data)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
sih->imr &= ~BIT(data->irq - sih->irq_base);
sih->imr_change_pending = true;
queue_work(wq, &sih->mask_work);
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
{
struct sih_agent *sih = irq_data_get_irq_chip_data(data);
unsigned long flags;
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
spin_lock_irqsave(&sih_agent_lock, flags);
if (irqd_get_trigger_type(data) != trigger) {
sih->edge_change |= BIT(data->irq - sih->irq_base);
queue_work(wq, &sih->edge_work);
}
spin_unlock_irqrestore(&sih_agent_lock, flags);
return 0;
}
static struct irq_chip twl4030_sih_irq_chip = {
.name = "twl4030",
.irq_mask = twl4030_sih_mask,
.irq_unmask = twl4030_sih_unmask,
.irq_set_type = twl4030_sih_set_type,
};
/*----------------------------------------------------------------------*/
static inline int sih_read_isr(const struct sih *sih)
{
int status;
union {
u8 bytes[4];
u32 word;
} isr;
/* FIXME need retry-on-error ... */
isr.word = 0;
status = twl_i2c_read(sih->module, isr.bytes,
sih->mask[irq_line].isr_offset, sih->bytes_ixr);
return (status < 0) ? status : le32_to_cpu(isr.word);
}
/*
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
int isr;
/* reading ISR acks the IRQs, using clear-on-read mode */
local_irq_enable();
isr = sih_read_isr(sih);
local_irq_disable();
if (isr < 0) {
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
return;
}
while (isr) {
irq = fls(isr);
irq--;
isr &= ~BIT(irq);
if (irq < sih->bits)
generic_handle_irq(agent->irq_base + irq);
else
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
}
static unsigned twl4030_irq_next;
/* returns the first IRQ used by this SIH bank,
* or negative errno
*/
int twl4030_sih_setup(int module)
{
int sih_mod;
const struct sih *sih = NULL;
struct sih_agent *agent;
int i, irq;
int status = -EINVAL;
unsigned irq_base = twl4030_irq_next;
/* only support modules with standard clear-on-read for now */
for (sih_mod = 0, sih = sih_modules;
sih_mod < nr_sih_modules;
sih_mod++, sih++) {
if (sih->module == module && sih->set_cor) {
if (!WARN((irq_base + sih->bits) > NR_IRQS,
"irq %d for %s too big\n",
irq_base + sih->bits,
sih->name))
status = 0;
break;
}
}
if (status < 0)
return status;
agent = kzalloc(sizeof *agent, GFP_KERNEL);
if (!agent)
return -ENOMEM;
status = 0;
agent->irq_base = irq_base;
agent->sih = sih;
agent->imr = ~0;
INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
for (i = 0; i < sih->bits; i++) {
irq = irq_base + i;
irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
handle_edge_irq);
irq_set_chip_data(irq, agent);
activate_irq(irq);
}
status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
irq_set_chained_handler(irq, handle_twl4030_sih);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
return status;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
/*----------------------------------------------------------------------*/
/* FIXME pass in which interrupt line we'll use ... */
#define twl_irq_line 0
int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
static struct irq_chip twl4030_irq_chip;
int status;
int i;
struct task_struct *task;
/*
* Mask and clear all TWL4030 interrupts since initially we do
* not have any TWL4030 module interrupt handlers present
*/
status = twl4030_init_sih_modules(twl_irq_line);
if (status < 0)
return status;
wq = create_singlethread_workqueue("twl4030-irqchip");
if (!wq) {
pr_err("twl4030: workqueue FAIL\n");
return -ESRCH;
}
twl4030_irq_base = irq_base;
/* install an irq handler for each of the SIH modules;
* clone dummy irq_chip since PIH can't *do* anything
*/
twl4030_irq_chip = dummy_irq_chip;
twl4030_irq_chip.name = "twl4030";
twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
for (i = irq_base; i < irq_end; i++) {
irq_set_chip_and_handler(i, &twl4030_irq_chip,
handle_simple_irq);
activate_irq(i);
}
twl4030_irq_next = i;
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
irq_num, irq_base, twl4030_irq_next - 1);
/* ... and the PWR_INT module ... */
status = twl4030_sih_setup(TWL4030_MODULE_INT);
if (status < 0) {
pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
goto fail;
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
init_completion(&irq_event);
status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED,
"TWL4030-PIH", &irq_event);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
task = kthread_run(twl4030_irq_thread, (void *)(long)irq_num,
"twl4030-irq");
if (IS_ERR(task)) {
pr_err("twl4030: could not create irq %d thread!\n", irq_num);
status = PTR_ERR(task);
goto fail_kthread;
}
return status;
fail_kthread:
free_irq(irq_num, &irq_event);
fail_rqirq:
/* clean up twl4030_sih_setup */
fail:
for (i = irq_base; i < irq_end; i++)
irq_set_chip_and_handler(i, NULL, NULL);
destroy_workqueue(wq);
wq = NULL;
return status;
}
int twl4030_exit_irq(void)
{
/* FIXME undo twl_init_irq() */
if (twl4030_irq_base) {
pr_err("twl4030: can't yet clean up IRQs?\n");
return -ENOSYS;
}
return 0;
}
int twl4030_init_chip_irq(const char *chip)
{
if (!strcmp(chip, "twl5031")) {
sih_modules = sih_modules_twl5031;
nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031);
} else {
sih_modules = sih_modules_twl4030;
nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030);
}
return 0;
}
| gpl-2.0 |
TeamEOS/kernel_moto_wingray | drivers/media/video/tcm825x.c | 2933 | 21992 | /*
* drivers/media/video/tcm825x.c
*
* TCM825X camera sensor driver.
*
* Copyright (C) 2007 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@nokia.com>
*
* Based on code from David Cohen <david.cohen@indt.org.br>
*
* This driver was based on ov9640 sensor driver from MontaVista
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/i2c.h>
#include <media/v4l2-int-device.h>
#include "tcm825x.h"
/*
* The sensor has two fps modes: the lower one just gives half the fps
* at the same xclk than the high one.
*/
#define MAX_FPS 30
#define MIN_FPS 8
#define MAX_HALF_FPS (MAX_FPS / 2)
#define HIGH_FPS_MODE_LOWER_LIMIT 14
#define DEFAULT_FPS MAX_HALF_FPS
struct tcm825x_sensor {
const struct tcm825x_platform_data *platform_data;
struct v4l2_int_device *v4l2_int_device;
struct i2c_client *i2c_client;
struct v4l2_pix_format pix;
struct v4l2_fract timeperframe;
};
/* list of image formats supported by TCM825X sensor */
static const struct v4l2_fmtdesc tcm825x_formats[] = {
{
.description = "YUYV (YUV 4:2:2), packed",
.pixelformat = V4L2_PIX_FMT_UYVY,
}, {
/* Note: V4L2 defines RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
*
* We interpret RGB565 as:
*
* Byte 0 Byte 1
* g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
*/
.description = "RGB565, le",
.pixelformat = V4L2_PIX_FMT_RGB565,
},
};
#define TCM825X_NUM_CAPTURE_FORMATS ARRAY_SIZE(tcm825x_formats)
/*
* TCM825X register configuration for all combinations of pixel format and
* image size
*/
static const struct tcm825x_reg subqcif = { 0x20, TCM825X_PICSIZ };
static const struct tcm825x_reg qcif = { 0x18, TCM825X_PICSIZ };
static const struct tcm825x_reg cif = { 0x14, TCM825X_PICSIZ };
static const struct tcm825x_reg qqvga = { 0x0c, TCM825X_PICSIZ };
static const struct tcm825x_reg qvga = { 0x04, TCM825X_PICSIZ };
static const struct tcm825x_reg vga = { 0x00, TCM825X_PICSIZ };
static const struct tcm825x_reg yuv422 = { 0x00, TCM825X_PICFMT };
static const struct tcm825x_reg rgb565 = { 0x02, TCM825X_PICFMT };
/* Our own specific controls */
#define V4L2_CID_ALC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_H_EDGE_EN V4L2_CID_PRIVATE_BASE + 1
#define V4L2_CID_V_EDGE_EN V4L2_CID_PRIVATE_BASE + 2
#define V4L2_CID_LENS V4L2_CID_PRIVATE_BASE + 3
#define V4L2_CID_MAX_EXPOSURE_TIME V4L2_CID_PRIVATE_BASE + 4
#define V4L2_CID_LAST_PRIV V4L2_CID_MAX_EXPOSURE_TIME
/* Video controls */
static struct vcontrol {
struct v4l2_queryctrl qc;
u16 reg;
u16 start_bit;
} video_control[] = {
{
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
.maximum = 63,
.step = 1,
},
.reg = TCM825X_AG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = 0,
.maximum = 255,
.step = 1,
},
.reg = TCM825X_MRG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = 0,
.maximum = 255,
.step = 1,
},
.reg = TCM825X_MBG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto White Balance",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_AWBSW,
.start_bit = 7,
},
{
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure Time",
.minimum = 0,
.maximum = 0x1fff,
.step = 1,
},
.reg = TCM825X_ESRSPD_U,
.start_bit = 0,
},
{
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Mirror Image",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_H_INV,
.start_bit = 6,
},
{
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Vertical Flip",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_V_INV,
.start_bit = 7,
},
/* Private controls */
{
{
.id = V4L2_CID_ALC,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto Luminance Control",
.minimum = 0,
.maximum = 1,
.step = 0,
},
.reg = TCM825X_ALCSW,
.start_bit = 7,
},
{
{
.id = V4L2_CID_H_EDGE_EN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Horizontal Edge Enhancement",
.minimum = 0,
.maximum = 0xff,
.step = 1,
},
.reg = TCM825X_HDTG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_V_EDGE_EN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Vertical Edge Enhancement",
.minimum = 0,
.maximum = 0xff,
.step = 1,
},
.reg = TCM825X_VDTG,
.start_bit = 0,
},
{
{
.id = V4L2_CID_LENS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Lens Shading Compensation",
.minimum = 0,
.maximum = 0x3f,
.step = 1,
},
.reg = TCM825X_LENS,
.start_bit = 0,
},
{
{
.id = V4L2_CID_MAX_EXPOSURE_TIME,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Maximum Exposure Time",
.minimum = 0,
.maximum = 0x3,
.step = 1,
},
.reg = TCM825X_ESRLIM,
.start_bit = 5,
},
};
static const struct tcm825x_reg *tcm825x_siz_reg[NUM_IMAGE_SIZES] =
{ &subqcif, &qqvga, &qcif, &qvga, &cif, &vga };
static const struct tcm825x_reg *tcm825x_fmt_reg[NUM_PIXEL_FORMATS] =
{ &yuv422, &rgb565 };
/*
* Read a value from a register in an TCM825X sensor device. The value is
* returned in 'val'.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_read_reg(struct i2c_client *client, int reg)
{
int err;
struct i2c_msg msg[2];
u8 reg_buf, data_buf = 0;
if (!client->adapter)
return -ENODEV;
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = ®_buf;
msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD;
msg[1].len = 1;
msg[1].buf = &data_buf;
reg_buf = reg;
err = i2c_transfer(client->adapter, msg, 2);
if (err < 0)
return err;
return data_buf;
}
/*
* Write a value to a register in an TCM825X sensor device.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_write_reg(struct i2c_client *client, u8 reg, u8 val)
{
int err;
struct i2c_msg msg[1];
unsigned char data[2];
if (!client->adapter)
return -ENODEV;
msg->addr = client->addr;
msg->flags = 0;
msg->len = 2;
msg->buf = data;
data[0] = reg;
data[1] = val;
err = i2c_transfer(client->adapter, msg, 1);
if (err >= 0)
return 0;
return err;
}
static int __tcm825x_write_reg_mask(struct i2c_client *client,
u8 reg, u8 val, u8 mask)
{
int rc;
/* need to do read - modify - write */
rc = tcm825x_read_reg(client, reg);
if (rc < 0)
return rc;
rc &= (~mask); /* Clear the masked bits */
val &= mask; /* Enforce mask on value */
val |= rc;
/* write the new value to the register */
rc = tcm825x_write_reg(client, reg, val);
if (rc)
return rc;
return 0;
}
#define tcm825x_write_reg_mask(client, regmask, val) \
__tcm825x_write_reg_mask(client, TCM825X_ADDR((regmask)), val, \
TCM825X_MASK((regmask)))
/*
* Initialize a list of TCM825X registers.
* The list of registers is terminated by the pair of values
* { TCM825X_REG_TERM, TCM825X_VAL_TERM }.
* Returns zero if successful, or non-zero otherwise.
*/
static int tcm825x_write_default_regs(struct i2c_client *client,
const struct tcm825x_reg *reglist)
{
int err;
const struct tcm825x_reg *next = reglist;
while (!((next->reg == TCM825X_REG_TERM)
&& (next->val == TCM825X_VAL_TERM))) {
err = tcm825x_write_reg(client, next->reg, next->val);
if (err) {
dev_err(&client->dev, "register writing failed\n");
return err;
}
next++;
}
return 0;
}
static struct vcontrol *find_vctrl(int id)
{
int i;
if (id < V4L2_CID_BASE)
return NULL;
for (i = 0; i < ARRAY_SIZE(video_control); i++)
if (video_control[i].qc.id == id)
return &video_control[i];
return NULL;
}
/*
* Find the best match for a requested image capture size. The best match
* is chosen as the nearest match that has the same number or fewer pixels
* as the requested size, or the smallest image size if the requested size
* has fewer pixels than the smallest image.
*/
static enum image_size tcm825x_find_size(struct v4l2_int_device *s,
unsigned int width,
unsigned int height)
{
enum image_size isize;
unsigned long pixels = width * height;
struct tcm825x_sensor *sensor = s->priv;
for (isize = subQCIF; isize < VGA; isize++) {
if (tcm825x_sizes[isize + 1].height
* tcm825x_sizes[isize + 1].width > pixels) {
dev_dbg(&sensor->i2c_client->dev, "size %d\n", isize);
return isize;
}
}
dev_dbg(&sensor->i2c_client->dev, "format default VGA\n");
return VGA;
}
/*
* Configure the TCM825X for current image size, pixel format, and
* frame period. fper is the frame period (in seconds) expressed as a
* fraction. Returns zero if successful, or non-zero otherwise. The
* actual frame period is returned in fper.
*/
static int tcm825x_configure(struct v4l2_int_device *s)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_pix_format *pix = &sensor->pix;
enum image_size isize = tcm825x_find_size(s, pix->width, pix->height);
struct v4l2_fract *fper = &sensor->timeperframe;
enum pixel_format pfmt;
int err;
u32 tgt_fps;
u8 val;
/* common register initialization */
err = tcm825x_write_default_regs(
sensor->i2c_client, sensor->platform_data->default_regs());
if (err)
return err;
/* configure image size */
val = tcm825x_siz_reg[isize]->val;
dev_dbg(&sensor->i2c_client->dev,
"configuring image size %d\n", isize);
err = tcm825x_write_reg_mask(sensor->i2c_client,
tcm825x_siz_reg[isize]->reg, val);
if (err)
return err;
/* configure pixel format */
switch (pix->pixelformat) {
default:
case V4L2_PIX_FMT_RGB565:
pfmt = RGB565;
break;
case V4L2_PIX_FMT_UYVY:
pfmt = YUV422;
break;
}
dev_dbg(&sensor->i2c_client->dev,
"configuring pixel format %d\n", pfmt);
val = tcm825x_fmt_reg[pfmt]->val;
err = tcm825x_write_reg_mask(sensor->i2c_client,
tcm825x_fmt_reg[pfmt]->reg, val);
if (err)
return err;
/*
* For frame rate < 15, the FPS reg (addr 0x02, bit 7) must be
* set. Frame rate will be halved from the normal.
*/
tgt_fps = fper->denominator / fper->numerator;
if (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) {
val = tcm825x_read_reg(sensor->i2c_client, 0x02);
val |= 0x80;
tcm825x_write_reg(sensor->i2c_client, 0x02, val);
}
return 0;
}
static int ioctl_queryctrl(struct v4l2_int_device *s,
struct v4l2_queryctrl *qc)
{
struct vcontrol *control;
control = find_vctrl(qc->id);
if (control == NULL)
return -EINVAL;
*qc = control->qc;
return 0;
}
static int ioctl_g_ctrl(struct v4l2_int_device *s,
struct v4l2_control *vc)
{
struct tcm825x_sensor *sensor = s->priv;
struct i2c_client *client = sensor->i2c_client;
int val, r;
struct vcontrol *lvc;
/* exposure time is special, spread across 2 registers */
if (vc->id == V4L2_CID_EXPOSURE) {
int val_lower, val_upper;
val_upper = tcm825x_read_reg(client,
TCM825X_ADDR(TCM825X_ESRSPD_U));
if (val_upper < 0)
return val_upper;
val_lower = tcm825x_read_reg(client,
TCM825X_ADDR(TCM825X_ESRSPD_L));
if (val_lower < 0)
return val_lower;
vc->value = ((val_upper & 0x1f) << 8) | (val_lower);
return 0;
}
lvc = find_vctrl(vc->id);
if (lvc == NULL)
return -EINVAL;
r = tcm825x_read_reg(client, TCM825X_ADDR(lvc->reg));
if (r < 0)
return r;
val = r & TCM825X_MASK(lvc->reg);
val >>= lvc->start_bit;
if (val < 0)
return val;
if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
val ^= sensor->platform_data->is_upside_down();
vc->value = val;
return 0;
}
static int ioctl_s_ctrl(struct v4l2_int_device *s,
struct v4l2_control *vc)
{
struct tcm825x_sensor *sensor = s->priv;
struct i2c_client *client = sensor->i2c_client;
struct vcontrol *lvc;
int val = vc->value;
/* exposure time is special, spread across 2 registers */
if (vc->id == V4L2_CID_EXPOSURE) {
int val_lower, val_upper;
val_lower = val & TCM825X_MASK(TCM825X_ESRSPD_L);
val_upper = (val >> 8) & TCM825X_MASK(TCM825X_ESRSPD_U);
if (tcm825x_write_reg_mask(client,
TCM825X_ESRSPD_U, val_upper))
return -EIO;
if (tcm825x_write_reg_mask(client,
TCM825X_ESRSPD_L, val_lower))
return -EIO;
return 0;
}
lvc = find_vctrl(vc->id);
if (lvc == NULL)
return -EINVAL;
if (vc->id == V4L2_CID_HFLIP || vc->id == V4L2_CID_VFLIP)
val ^= sensor->platform_data->is_upside_down();
val = val << lvc->start_bit;
if (tcm825x_write_reg_mask(client, lvc->reg, val))
return -EIO;
return 0;
}
static int ioctl_enum_fmt_cap(struct v4l2_int_device *s,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
switch (fmt->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (index >= TCM825X_NUM_CAPTURE_FORMATS)
return -EINVAL;
break;
default:
return -EINVAL;
}
fmt->flags = tcm825x_formats[index].flags;
strlcpy(fmt->description, tcm825x_formats[index].description,
sizeof(fmt->description));
fmt->pixelformat = tcm825x_formats[index].pixelformat;
return 0;
}
static int ioctl_try_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
enum image_size isize;
int ifmt;
struct v4l2_pix_format *pix = &f->fmt.pix;
isize = tcm825x_find_size(s, pix->width, pix->height);
dev_dbg(&sensor->i2c_client->dev, "isize = %d num_capture = %lu\n",
isize, (unsigned long)TCM825X_NUM_CAPTURE_FORMATS);
pix->width = tcm825x_sizes[isize].width;
pix->height = tcm825x_sizes[isize].height;
for (ifmt = 0; ifmt < TCM825X_NUM_CAPTURE_FORMATS; ifmt++)
if (pix->pixelformat == tcm825x_formats[ifmt].pixelformat)
break;
if (ifmt == TCM825X_NUM_CAPTURE_FORMATS)
ifmt = 0; /* Default = YUV 4:2:2 */
pix->pixelformat = tcm825x_formats[ifmt].pixelformat;
pix->field = V4L2_FIELD_NONE;
pix->bytesperline = pix->width * TCM825X_BYTES_PER_PIXEL;
pix->sizeimage = pix->bytesperline * pix->height;
pix->priv = 0;
dev_dbg(&sensor->i2c_client->dev, "format = 0x%08x\n",
pix->pixelformat);
switch (pix->pixelformat) {
case V4L2_PIX_FMT_UYVY:
default:
pix->colorspace = V4L2_COLORSPACE_JPEG;
break;
case V4L2_PIX_FMT_RGB565:
pix->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
return 0;
}
static int ioctl_s_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
int rval;
rval = ioctl_try_fmt_cap(s, f);
if (rval)
return rval;
rval = tcm825x_configure(s);
sensor->pix = *pix;
return rval;
}
static int ioctl_g_fmt_cap(struct v4l2_int_device *s,
struct v4l2_format *f)
{
struct tcm825x_sensor *sensor = s->priv;
f->fmt.pix = sensor->pix;
return 0;
}
static int ioctl_g_parm(struct v4l2_int_device *s,
struct v4l2_streamparm *a)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_captureparm *cparm = &a->parm.capture;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(a, 0, sizeof(*a));
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cparm->capability = V4L2_CAP_TIMEPERFRAME;
cparm->timeperframe = sensor->timeperframe;
return 0;
}
static int ioctl_s_parm(struct v4l2_int_device *s,
struct v4l2_streamparm *a)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_fract *timeperframe = &a->parm.capture.timeperframe;
u32 tgt_fps; /* target frames per secound */
int rval;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if ((timeperframe->numerator == 0)
|| (timeperframe->denominator == 0)) {
timeperframe->denominator = DEFAULT_FPS;
timeperframe->numerator = 1;
}
tgt_fps = timeperframe->denominator / timeperframe->numerator;
if (tgt_fps > MAX_FPS) {
timeperframe->denominator = MAX_FPS;
timeperframe->numerator = 1;
} else if (tgt_fps < MIN_FPS) {
timeperframe->denominator = MIN_FPS;
timeperframe->numerator = 1;
}
sensor->timeperframe = *timeperframe;
rval = tcm825x_configure(s);
return rval;
}
static int ioctl_s_power(struct v4l2_int_device *s, int on)
{
struct tcm825x_sensor *sensor = s->priv;
return sensor->platform_data->power_set(on);
}
/*
* Given the image capture format in pix, the nominal frame period in
* timeperframe, calculate the required xclk frequency.
*
* TCM825X input frequency characteristics are:
* Minimum 11.9 MHz, Typical 24.57 MHz and maximum 25/27 MHz
*/
static int ioctl_g_ifparm(struct v4l2_int_device *s, struct v4l2_ifparm *p)
{
struct tcm825x_sensor *sensor = s->priv;
struct v4l2_fract *timeperframe = &sensor->timeperframe;
u32 tgt_xclk; /* target xclk */
u32 tgt_fps; /* target frames per secound */
int rval;
rval = sensor->platform_data->ifparm(p);
if (rval)
return rval;
tgt_fps = timeperframe->denominator / timeperframe->numerator;
tgt_xclk = (tgt_fps <= HIGH_FPS_MODE_LOWER_LIMIT) ?
(2457 * tgt_fps) / MAX_HALF_FPS :
(2457 * tgt_fps) / MAX_FPS;
tgt_xclk *= 10000;
tgt_xclk = min(tgt_xclk, (u32)TCM825X_XCLK_MAX);
tgt_xclk = max(tgt_xclk, (u32)TCM825X_XCLK_MIN);
p->u.bt656.clock_curr = tgt_xclk;
return 0;
}
static int ioctl_g_needs_reset(struct v4l2_int_device *s, void *buf)
{
struct tcm825x_sensor *sensor = s->priv;
return sensor->platform_data->needs_reset(s, buf, &sensor->pix);
}
static int ioctl_reset(struct v4l2_int_device *s)
{
return -EBUSY;
}
static int ioctl_init(struct v4l2_int_device *s)
{
return tcm825x_configure(s);
}
static int ioctl_dev_exit(struct v4l2_int_device *s)
{
return 0;
}
static int ioctl_dev_init(struct v4l2_int_device *s)
{
struct tcm825x_sensor *sensor = s->priv;
int r;
r = tcm825x_read_reg(sensor->i2c_client, 0x01);
if (r < 0)
return r;
if (r == 0) {
dev_err(&sensor->i2c_client->dev, "device not detected\n");
return -EIO;
}
return 0;
}
static struct v4l2_int_ioctl_desc tcm825x_ioctl_desc[] = {
{ vidioc_int_dev_init_num,
(v4l2_int_ioctl_func *)ioctl_dev_init },
{ vidioc_int_dev_exit_num,
(v4l2_int_ioctl_func *)ioctl_dev_exit },
{ vidioc_int_s_power_num,
(v4l2_int_ioctl_func *)ioctl_s_power },
{ vidioc_int_g_ifparm_num,
(v4l2_int_ioctl_func *)ioctl_g_ifparm },
{ vidioc_int_g_needs_reset_num,
(v4l2_int_ioctl_func *)ioctl_g_needs_reset },
{ vidioc_int_reset_num,
(v4l2_int_ioctl_func *)ioctl_reset },
{ vidioc_int_init_num,
(v4l2_int_ioctl_func *)ioctl_init },
{ vidioc_int_enum_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_enum_fmt_cap },
{ vidioc_int_try_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_try_fmt_cap },
{ vidioc_int_g_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_g_fmt_cap },
{ vidioc_int_s_fmt_cap_num,
(v4l2_int_ioctl_func *)ioctl_s_fmt_cap },
{ vidioc_int_g_parm_num,
(v4l2_int_ioctl_func *)ioctl_g_parm },
{ vidioc_int_s_parm_num,
(v4l2_int_ioctl_func *)ioctl_s_parm },
{ vidioc_int_queryctrl_num,
(v4l2_int_ioctl_func *)ioctl_queryctrl },
{ vidioc_int_g_ctrl_num,
(v4l2_int_ioctl_func *)ioctl_g_ctrl },
{ vidioc_int_s_ctrl_num,
(v4l2_int_ioctl_func *)ioctl_s_ctrl },
};
static struct v4l2_int_slave tcm825x_slave = {
.ioctls = tcm825x_ioctl_desc,
.num_ioctls = ARRAY_SIZE(tcm825x_ioctl_desc),
};
static struct tcm825x_sensor tcm825x;
static struct v4l2_int_device tcm825x_int_device = {
.module = THIS_MODULE,
.name = TCM825X_NAME,
.priv = &tcm825x,
.type = v4l2_int_type_slave,
.u = {
.slave = &tcm825x_slave,
},
};
static int tcm825x_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct tcm825x_sensor *sensor = &tcm825x;
if (i2c_get_clientdata(client))
return -EBUSY;
sensor->platform_data = client->dev.platform_data;
if (sensor->platform_data == NULL
|| !sensor->platform_data->is_okay())
return -ENODEV;
sensor->v4l2_int_device = &tcm825x_int_device;
sensor->i2c_client = client;
i2c_set_clientdata(client, sensor);
/* Make the default capture format QVGA RGB565 */
sensor->pix.width = tcm825x_sizes[QVGA].width;
sensor->pix.height = tcm825x_sizes[QVGA].height;
sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565;
return v4l2_int_device_register(sensor->v4l2_int_device);
}
static int tcm825x_remove(struct i2c_client *client)
{
struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
if (!client->adapter)
return -ENODEV; /* our client isn't attached */
v4l2_int_device_unregister(sensor->v4l2_int_device);
return 0;
}
static const struct i2c_device_id tcm825x_id[] = {
{ "tcm825x", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tcm825x_id);
static struct i2c_driver tcm825x_i2c_driver = {
.driver = {
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
.remove = tcm825x_remove,
.id_table = tcm825x_id,
};
static struct tcm825x_sensor tcm825x = {
.timeperframe = {
.numerator = 1,
.denominator = DEFAULT_FPS,
},
};
static int __init tcm825x_init(void)
{
int rval;
rval = i2c_add_driver(&tcm825x_i2c_driver);
if (rval)
printk(KERN_INFO "%s: failed registering " TCM825X_NAME "\n",
__func__);
return rval;
}
static void __exit tcm825x_exit(void)
{
i2c_del_driver(&tcm825x_i2c_driver);
}
/*
* FIXME: Menelaus isn't ready (?) at module_init stage, so use
* late_initcall for now.
*/
late_initcall(tcm825x_init);
module_exit(tcm825x_exit);
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
MODULE_DESCRIPTION("TCM825x camera sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fschaefer/android-samsung-3.0-jb | arch/arm/mach-s3c64xx/dev-audio.c | 2933 | 6768 | /* linux/arch/arm/plat-s3c/dev-audio.c
*
* Copyright 2009 Wolfson Microelectronics
* Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <mach/irqs.h>
#include <mach/map.h>
#include <mach/dma.h>
#include <plat/devs.h>
#include <plat/audio.h>
#include <plat/gpio-cfg.h>
static const char *rclksrc[] = {
[0] = "iis",
[1] = "audio-bus",
};
static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
{
unsigned int base;
switch (pdev->id) {
case 0:
base = S3C64XX_GPD(0);
break;
case 1:
base = S3C64XX_GPE(0);
break;
case 2:
s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
return 0;
default:
printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
pdev->id);
return -EINVAL;
}
s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(3));
return 0;
}
static struct resource s3c64xx_iis0_resource[] = {
[0] = {
.start = S3C64XX_PA_IIS0,
.end = S3C64XX_PA_IIS0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S0_OUT,
.end = DMACH_I2S0_OUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S0_IN,
.end = DMACH_I2S0_IN,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
.type = {
.i2s = {
.src_clk = rclksrc,
},
},
};
struct platform_device s3c64xx_device_iis0 = {
.name = "samsung-i2s",
.id = 0,
.num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
.resource = s3c64xx_iis0_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis0);
static struct resource s3c64xx_iis1_resource[] = {
[0] = {
.start = S3C64XX_PA_IIS1,
.end = S3C64XX_PA_IIS1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S1_OUT,
.end = DMACH_I2S1_OUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S1_IN,
.end = DMACH_I2S1_IN,
.flags = IORESOURCE_DMA,
},
};
struct platform_device s3c64xx_device_iis1 = {
.name = "samsung-i2s",
.id = 1,
.num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
.resource = s3c64xx_iis1_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iis1);
static struct resource s3c64xx_iisv4_resource[] = {
[0] = {
.start = S3C64XX_PA_IISV4,
.end = S3C64XX_PA_IISV4 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_HSI_I2SV40_TX,
.end = DMACH_HSI_I2SV40_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_HSI_I2SV40_RX,
.end = DMACH_HSI_I2SV40_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata i2sv4_pdata = {
.cfg_gpio = s3c64xx_i2s_cfg_gpio,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN,
.src_clk = rclksrc,
},
},
};
struct platform_device s3c64xx_device_iisv4 = {
.name = "samsung-i2s",
.id = 2,
.num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
.resource = s3c64xx_iisv4_resource,
.dev = {
.platform_data = &i2sv4_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_iisv4);
/* PCM Controller platform_devices */
static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
{
unsigned int base;
switch (pdev->id) {
case 0:
base = S3C64XX_GPD(0);
break;
case 1:
base = S3C64XX_GPE(0);
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number: %d\n",
pdev->id);
return -EINVAL;
}
s3c_gpio_cfgpin_range(base, 5, S3C_GPIO_SFN(2));
return 0;
}
static struct resource s3c64xx_pcm0_resource[] = {
[0] = {
.start = S3C64XX_PA_PCM0,
.end = S3C64XX_PA_PCM0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM0_TX,
.end = DMACH_PCM0_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM0_RX,
.end = DMACH_PCM0_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata s3c_pcm0_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
};
struct platform_device s3c64xx_device_pcm0 = {
.name = "samsung-pcm",
.id = 0,
.num_resources = ARRAY_SIZE(s3c64xx_pcm0_resource),
.resource = s3c64xx_pcm0_resource,
.dev = {
.platform_data = &s3c_pcm0_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_pcm0);
static struct resource s3c64xx_pcm1_resource[] = {
[0] = {
.start = S3C64XX_PA_PCM1,
.end = S3C64XX_PA_PCM1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM1_TX,
.end = DMACH_PCM1_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM1_RX,
.end = DMACH_PCM1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata s3c_pcm1_pdata = {
.cfg_gpio = s3c64xx_pcm_cfg_gpio,
};
struct platform_device s3c64xx_device_pcm1 = {
.name = "samsung-pcm",
.id = 1,
.num_resources = ARRAY_SIZE(s3c64xx_pcm1_resource),
.resource = s3c64xx_pcm1_resource,
.dev = {
.platform_data = &s3c_pcm1_pdata,
},
};
EXPORT_SYMBOL(s3c64xx_device_pcm1);
/* AC97 Controller platform devices */
static int s3c64xx_ac97_cfg_gpd(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(S3C64XX_GPD(0), 5, S3C_GPIO_SFN(4));
}
static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(S3C64XX_GPE(0), 5, S3C_GPIO_SFN(4));
}
static struct resource s3c64xx_ac97_resource[] = {
[0] = {
.start = S3C64XX_PA_AC97,
.end = S3C64XX_PA_AC97 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_AC97_PCMOUT,
.end = DMACH_AC97_PCMOUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_AC97_PCMIN,
.end = DMACH_AC97_PCMIN,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMACH_AC97_MICIN,
.end = DMACH_AC97_MICIN,
.flags = IORESOURCE_DMA,
},
[4] = {
.start = IRQ_AC97,
.end = IRQ_AC97,
.flags = IORESOURCE_IRQ,
},
};
static struct s3c_audio_pdata s3c_ac97_pdata;
static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
struct platform_device s3c64xx_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(s3c64xx_ac97_resource),
.resource = s3c64xx_ac97_resource,
.dev = {
.platform_data = &s3c_ac97_pdata,
.dma_mask = &s3c64xx_ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
EXPORT_SYMBOL(s3c64xx_device_ac97);
void __init s3c64xx_ac97_setup_gpio(int num)
{
if (num == S3C64XX_AC97_GPD)
s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpd;
else
s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
}
| gpl-2.0 |
TRKP/android_kernel_samsung_i9300 | arch/arm/mach-omap1/io.c | 2933 | 4180 | /*
* linux/arch/arm/mach-omap1/io.c
*
* OMAP1 I/O mapping code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <plat/mux.h>
#include <plat/tc.h>
#include "clock.h"
extern void omap_check_revision(void);
extern void omap_sram_init(void);
/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
static struct map_desc omap_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
}
};
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
static struct map_desc omap7xx_io_desc[] __initdata = {
{
.virtual = OMAP7XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSP_START),
.length = OMAP7XX_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP7XX_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSPREG_START),
.length = OMAP7XX_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
#ifdef CONFIG_ARCH_OMAP15XX
static struct map_desc omap1510_io_desc[] __initdata = {
{
.virtual = OMAP1510_DSP_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSP_START),
.length = OMAP1510_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP1510_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSPREG_START),
.length = OMAP1510_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
static struct map_desc omap16xx_io_desc[] __initdata = {
{
.virtual = OMAP16XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSP_START),
.length = OMAP16XX_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP16XX_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSPREG_START),
.length = OMAP16XX_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
/*
* Maps common IO regions for omap1. This should only get called from
* board specific init.
*/
void __init omap1_map_common_io(void)
{
iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
/* Normally devicemaps_init() would flush caches and tlb after
* mdesc->map_io(), but we must also do it here because of the CPU
* revision check below.
*/
local_flush_tlb_all();
flush_cache_all();
/* We want to check CPU revision early for cpu_is_omapxxxx() macros.
* IO space mapping must be initialized before we can do that.
*/
omap_check_revision();
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
if (cpu_is_omap7xx()) {
iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
}
#endif
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap15xx()) {
iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
}
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
if (cpu_is_omap16xx()) {
iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
}
#endif
omap_sram_init();
}
/*
* Common low-level hardware init for omap1. This should only get called from
* board specific init.
*/
void __init omap1_init_common_hw(void)
{
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".
*/
omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL);
omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL);
/* Must init clocks early to assure that timer interrupt works
*/
omap1_clk_init();
omap1_mux_init();
}
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
u8 omap_readb(u32 pa)
{
return __raw_readb(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readb);
u16 omap_readw(u32 pa)
{
return __raw_readw(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readw);
u32 omap_readl(u32 pa)
{
return __raw_readl(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readl);
void omap_writeb(u8 v, u32 pa)
{
__raw_writeb(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writeb);
void omap_writew(u16 v, u32 pa)
{
__raw_writew(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writew);
void omap_writel(u32 v, u32 pa)
{
__raw_writel(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writel);
| gpl-2.0 |
agat63/AGAT_JB_kernel | net/wimax/stack.c | 3957 | 18205 | /*
* Linux WiMAX
* Initialization, addition and removal of wimax devices
*
*
* Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* This implements:
*
* - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
* addition/registration initialize all subfields and allocate
* generic netlink resources for user space communication. On
* removal/unregistration, undo all that.
*
* - device state machine [wimax_state_change()] and support to send
* reports to user space when the state changes
* [wimax_gnl_re_state_change*()].
*
* See include/net/wimax.h for rationales and design.
*
* ROADMAP
*
* [__]wimax_state_change() Called by drivers to update device's state
* wimax_gnl_re_state_change_alloc()
* wimax_gnl_re_state_change_send()
*
* wimax_dev_init() Init a device
* wimax_dev_add() Register
* wimax_rfkill_add()
* wimax_gnl_add() Register all the generic netlink resources.
* wimax_id_table_add()
* wimax_dev_rm() Unregister
* wimax_id_table_rm()
* wimax_gnl_rm()
* wimax_rfkill_rm()
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <net/genetlink.h>
#include <linux/netdevice.h>
#include <linux/wimax.h>
#include "wimax-internal.h"
#define D_SUBMODULE stack
#include "debug-levels.h"
static char wimax_debug_params[128];
module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
0644);
MODULE_PARM_DESC(debug,
"String of space-separated NAME:VALUE pairs, where NAMEs "
"are the different debug submodules and VALUE are the "
"initial debug value to set.");
/*
* Authoritative source for the RE_STATE_CHANGE attribute policy
*
* We don't really use it here, but /me likes to keep the definition
* close to where the data is generated.
*/
/*
static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
[WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
};
*/
/*
* Allocate a Report State Change message
*
* @header: save it, you need it for _send()
*
* Creates and fills a basic state change message; different code
* paths can then add more attributes to the message as needed.
*
* Use wimax_gnl_re_state_change_send() to send the returned skb.
*
* Returns: skb with the genl message if ok, IS_ERR() ptr on error
* with an errno code.
*/
static
struct sk_buff *wimax_gnl_re_state_change_alloc(
struct wimax_dev *wimax_dev,
enum wimax_st new_state, enum wimax_st old_state,
void **header)
{
int result;
struct device *dev = wimax_dev_to_dev(wimax_dev);
void *data;
struct sk_buff *report_skb;
d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
wimax_dev, new_state, old_state);
result = -ENOMEM;
report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (report_skb == NULL) {
dev_err(dev, "RE_STCH: can't create message\n");
goto error_new;
}
data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family,
0, WIMAX_GNL_RE_STATE_CHANGE);
if (data == NULL) {
dev_err(dev, "RE_STCH: can't put data into message\n");
goto error_put;
}
*header = data;
result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
if (result < 0) {
dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
goto error_put;
}
result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
if (result < 0) {
dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
goto error_put;
}
result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
wimax_dev->net_dev->ifindex);
if (result < 0) {
dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
goto error_put;
}
d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
wimax_dev, new_state, old_state, report_skb);
return report_skb;
error_put:
nlmsg_free(report_skb);
error_new:
d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
wimax_dev, new_state, old_state, result);
return ERR_PTR(result);
}
/*
* Send a Report State Change message (as created with _alloc).
*
* @report_skb: as returned by wimax_gnl_re_state_change_alloc()
* @header: as returned by wimax_gnl_re_state_change_alloc()
*
* Returns: 0 if ok, < 0 errno code on error.
*
* If the message is NULL, pretend it didn't happen.
*/
static
int wimax_gnl_re_state_change_send(
struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
void *header)
{
int result = 0;
struct device *dev = wimax_dev_to_dev(wimax_dev);
d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
wimax_dev, report_skb);
if (report_skb == NULL) {
result = -ENOMEM;
goto out;
}
genlmsg_end(report_skb, header);
genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
out:
d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
wimax_dev, report_skb, result);
return result;
}
static
void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
unsigned allowed_states_bm)
{
if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
old_state, new_state);
}
}
/*
* Set the current state of a WiMAX device [unlocking version of
* wimax_state_change().
*/
void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
{
struct device *dev = wimax_dev_to_dev(wimax_dev);
enum wimax_st old_state = wimax_dev->state;
struct sk_buff *stch_skb;
void *header;
d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
wimax_dev, new_state, old_state);
if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
dev_err(dev, "SW BUG: requesting invalid state %u\n",
new_state);
goto out;
}
if (old_state == new_state)
goto out;
header = NULL; /* gcc complains? can't grok why */
stch_skb = wimax_gnl_re_state_change_alloc(
wimax_dev, new_state, old_state, &header);
/* Verify the state transition and do exit-from-state actions */
switch (old_state) {
case __WIMAX_ST_NULL:
__check_new_state(old_state, new_state,
1 << WIMAX_ST_DOWN);
break;
case WIMAX_ST_DOWN:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_UNINITIALIZED
| 1 << WIMAX_ST_RADIO_OFF);
break;
case __WIMAX_ST_QUIESCING:
__check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
break;
case WIMAX_ST_UNINITIALIZED:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_RADIO_OFF);
break;
case WIMAX_ST_RADIO_OFF:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_READY);
break;
case WIMAX_ST_READY:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_RADIO_OFF
| 1 << WIMAX_ST_SCANNING
| 1 << WIMAX_ST_CONNECTING
| 1 << WIMAX_ST_CONNECTED);
break;
case WIMAX_ST_SCANNING:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_RADIO_OFF
| 1 << WIMAX_ST_READY
| 1 << WIMAX_ST_CONNECTING
| 1 << WIMAX_ST_CONNECTED);
break;
case WIMAX_ST_CONNECTING:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_RADIO_OFF
| 1 << WIMAX_ST_READY
| 1 << WIMAX_ST_SCANNING
| 1 << WIMAX_ST_CONNECTED);
break;
case WIMAX_ST_CONNECTED:
__check_new_state(old_state, new_state,
1 << __WIMAX_ST_QUIESCING
| 1 << WIMAX_ST_RADIO_OFF
| 1 << WIMAX_ST_READY);
netif_tx_disable(wimax_dev->net_dev);
netif_carrier_off(wimax_dev->net_dev);
break;
case __WIMAX_ST_INVALID:
default:
dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
wimax_dev, wimax_dev->state);
WARN_ON(1);
goto out;
}
/* Execute the actions of entry to the new state */
switch (new_state) {
case __WIMAX_ST_NULL:
dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
"from %u\n", wimax_dev, wimax_dev->state);
WARN_ON(1); /* Nobody can enter this state */
break;
case WIMAX_ST_DOWN:
break;
case __WIMAX_ST_QUIESCING:
break;
case WIMAX_ST_UNINITIALIZED:
break;
case WIMAX_ST_RADIO_OFF:
break;
case WIMAX_ST_READY:
break;
case WIMAX_ST_SCANNING:
break;
case WIMAX_ST_CONNECTING:
break;
case WIMAX_ST_CONNECTED:
netif_carrier_on(wimax_dev->net_dev);
netif_wake_queue(wimax_dev->net_dev);
break;
case __WIMAX_ST_INVALID:
default:
BUG();
}
__wimax_state_set(wimax_dev, new_state);
if (!IS_ERR(stch_skb))
wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
out:
d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
wimax_dev, new_state, old_state);
}
/**
* wimax_state_change - Set the current state of a WiMAX device
*
* @wimax_dev: WiMAX device descriptor (properly referenced)
* @new_state: New state to switch to
*
* This implements the state changes for the wimax devices. It will
*
* - verify that the state transition is legal (for now it'll just
* print a warning if not) according to the table in
* linux/wimax.h's documentation for 'enum wimax_st'.
*
* - perform the actions needed for leaving the current state and
* whichever are needed for entering the new state.
*
* - issue a report to user space indicating the new state (and an
* optional payload with information about the new state).
*
* NOTE: @wimax_dev must be locked
*/
void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
{
/*
* A driver cannot take the wimax_dev out of the
* __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
* the wimax_dev's state is still NULL, we ignore any request
* to change its state because it means it hasn't been yet
* registered.
*
* There is no need to complain about it, as routines that
* call this might be shared from different code paths that
* are called before or after wimax_dev_add() has done its
* job.
*/
mutex_lock(&wimax_dev->mutex);
if (wimax_dev->state > __WIMAX_ST_NULL)
__wimax_state_change(wimax_dev, new_state);
mutex_unlock(&wimax_dev->mutex);
}
EXPORT_SYMBOL_GPL(wimax_state_change);
/**
* wimax_state_get() - Return the current state of a WiMAX device
*
* @wimax_dev: WiMAX device descriptor
*
* Returns: Current state of the device according to its driver.
*/
enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
{
enum wimax_st state;
mutex_lock(&wimax_dev->mutex);
state = wimax_dev->state;
mutex_unlock(&wimax_dev->mutex);
return state;
}
EXPORT_SYMBOL_GPL(wimax_state_get);
/**
* wimax_dev_init - initialize a newly allocated instance
*
* @wimax_dev: WiMAX device descriptor to initialize.
*
* Initializes fields of a freshly allocated @wimax_dev instance. This
* function assumes that after allocation, the memory occupied by
* @wimax_dev was zeroed.
*/
void wimax_dev_init(struct wimax_dev *wimax_dev)
{
INIT_LIST_HEAD(&wimax_dev->id_table_node);
__wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
mutex_init(&wimax_dev->mutex);
mutex_init(&wimax_dev->mutex_reset);
}
EXPORT_SYMBOL_GPL(wimax_dev_init);
/*
* This extern is declared here because it's easier to keep track --
* both declarations are a list of the same
*/
extern struct genl_ops
wimax_gnl_msg_from_user,
wimax_gnl_reset,
wimax_gnl_rfkill,
wimax_gnl_state_get;
static
struct genl_ops *wimax_gnl_ops[] = {
&wimax_gnl_msg_from_user,
&wimax_gnl_reset,
&wimax_gnl_rfkill,
&wimax_gnl_state_get,
};
static
size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
unsigned char *addr, size_t addr_len)
{
unsigned cnt, total;
for (total = cnt = 0; cnt < addr_len; cnt++)
total += scnprintf(addr_str + total, addr_str_size - total,
"%02x%c", addr[cnt],
cnt == addr_len - 1 ? '\0' : ':');
return total;
}
/**
* wimax_dev_add - Register a new WiMAX device
*
* @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
* priv data). You must have called wimax_dev_init() on it before.
*
* @net_dev: net device the @wimax_dev is associated with. The
* function expects SET_NETDEV_DEV() and register_netdev() were
* already called on it.
*
* Registers the new WiMAX device, sets up the user-kernel control
* interface (generic netlink) and common WiMAX infrastructure.
*
* Note that the parts that will allow interaction with user space are
* setup at the very end, when the rest is in place, as once that
* happens, the driver might get user space control requests via
* netlink or from debugfs that might translate into calls into
* wimax_dev->op_*().
*/
int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
{
int result;
struct device *dev = net_dev->dev.parent;
char addr_str[32];
d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
/* Do the RFKILL setup before locking, as RFKILL will call
* into our functions. */
wimax_dev->net_dev = net_dev;
result = wimax_rfkill_add(wimax_dev);
if (result < 0)
goto error_rfkill_add;
/* Set up user-space interaction */
mutex_lock(&wimax_dev->mutex);
wimax_id_table_add(wimax_dev);
result = wimax_debugfs_add(wimax_dev);
if (result < 0) {
dev_err(dev, "cannot initialize debugfs: %d\n",
result);
goto error_debugfs_add;
}
__wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
mutex_unlock(&wimax_dev->mutex);
wimax_addr_scnprint(addr_str, sizeof(addr_str),
net_dev->dev_addr, net_dev->addr_len);
dev_err(dev, "WiMAX interface %s (%s) ready\n",
net_dev->name, addr_str);
d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
return 0;
error_debugfs_add:
wimax_id_table_rm(wimax_dev);
mutex_unlock(&wimax_dev->mutex);
wimax_rfkill_rm(wimax_dev);
error_rfkill_add:
d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
wimax_dev, net_dev, result);
return result;
}
EXPORT_SYMBOL_GPL(wimax_dev_add);
/**
* wimax_dev_rm - Unregister an existing WiMAX device
*
* @wimax_dev: WiMAX device descriptor
*
* Unregisters a WiMAX device previously registered for use with
* wimax_add_rm().
*
* IMPORTANT! Must call before calling unregister_netdev().
*
* After this function returns, you will not get any more user space
* control requests (via netlink or debugfs) and thus to wimax_dev->ops.
*
* Reentrancy control is ensured by setting the state to
* %__WIMAX_ST_QUIESCING. rfkill operations coming through
* wimax_*rfkill*() will be stopped by the quiescing state; ops coming
* from the rfkill subsystem will be stopped by the support being
* removed by wimax_rfkill_rm().
*/
void wimax_dev_rm(struct wimax_dev *wimax_dev)
{
d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
mutex_lock(&wimax_dev->mutex);
__wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
wimax_debugfs_rm(wimax_dev);
wimax_id_table_rm(wimax_dev);
__wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
mutex_unlock(&wimax_dev->mutex);
wimax_rfkill_rm(wimax_dev);
d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
}
EXPORT_SYMBOL_GPL(wimax_dev_rm);
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(op_state_get),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE,
.name = "WiMAX",
.version = WIMAX_GNL_VERSION,
.hdrsize = 0,
.maxattr = WIMAX_GNL_ATTR_MAX,
};
struct genl_multicast_group wimax_gnl_mcg = {
.name = "msg",
};
/* Shutdown the wimax stack */
static
int __init wimax_subsys_init(void)
{
int result, cnt;
d_fnstart(4, NULL, "()\n");
d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
"wimax.debug");
snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
"WiMAX");
result = genl_register_family(&wimax_gnl_family);
if (unlikely(result < 0)) {
printk(KERN_ERR "cannot register generic netlink family: %d\n",
result);
goto error_register_family;
}
for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) {
result = genl_register_ops(&wimax_gnl_family,
wimax_gnl_ops[cnt]);
d_printf(4, NULL, "registering generic netlink op code "
"%u: %d\n", wimax_gnl_ops[cnt]->cmd, result);
if (unlikely(result < 0)) {
printk(KERN_ERR "cannot register generic netlink op "
"code %u: %d\n",
wimax_gnl_ops[cnt]->cmd, result);
goto error_register_ops;
}
}
result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
if (result < 0)
goto error_mc_group;
d_fnend(4, NULL, "() = 0\n");
return 0;
error_mc_group:
error_register_ops:
for (cnt--; cnt >= 0; cnt--)
genl_unregister_ops(&wimax_gnl_family,
wimax_gnl_ops[cnt]);
genl_unregister_family(&wimax_gnl_family);
error_register_family:
d_fnend(4, NULL, "() = %d\n", result);
return result;
}
module_init(wimax_subsys_init);
/* Shutdown the wimax stack */
static
void __exit wimax_subsys_exit(void)
{
int cnt;
wimax_id_table_release();
genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--)
genl_unregister_ops(&wimax_gnl_family,
wimax_gnl_ops[cnt]);
genl_unregister_family(&wimax_gnl_family);
}
module_exit(wimax_subsys_exit);
MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Linux WiMAX stack");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jamison904/kernel_jflte_tw | arch/m68k/platform/coldfire/timers.c | 4469 | 5415 | /***************************************************************************/
/*
* timers.c -- generic ColdFire hardware timer support.
*
* Copyright (C) 1999-2008, Greg Ungerer <gerg@snapgear.com>
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <asm/io.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcftimer.h>
#include <asm/mcfsim.h>
/***************************************************************************/
/*
* By default use timer1 as the system clock timer.
*/
#define FREQ (MCF_BUSCLK / 16)
#define TA(a) (MCFTIMER_BASE1 + (a))
/*
* These provide the underlying interrupt vector support.
* Unfortunately it is a little different on each ColdFire.
*/
void coldfire_profile_init(void);
#if defined(CONFIG_M532x)
#define __raw_readtrr __raw_readl
#define __raw_writetrr __raw_writel
#else
#define __raw_readtrr __raw_readw
#define __raw_writetrr __raw_writew
#endif
static u32 mcftmr_cycles_per_jiffy;
static u32 mcftmr_cnt;
static irq_handler_t timer_interrupt;
/***************************************************************************/
static void init_timer_irq(void)
{
#ifdef MCFSIM_ICR_AUTOVEC
/* Timer1 is always used as system timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER1ICR);
mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
#ifdef CONFIG_HIGHPROFILE
/* Timer2 is to be used as a high speed profile timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER2ICR);
mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
#endif
#endif /* MCFSIM_ICR_AUTOVEC */
}
/***************************************************************************/
static irqreturn_t mcftmr_tick(int irq, void *dummy)
{
/* Reset the ColdFire timer */
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
mcftmr_cnt += mcftmr_cycles_per_jiffy;
return timer_interrupt(irq, dummy);
}
/***************************************************************************/
static struct irqaction mcftmr_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = mcftmr_tick,
};
/***************************************************************************/
static cycle_t mcftmr_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;
u16 tcn;
local_irq_save(flags);
tcn = __raw_readw(TA(MCFTIMER_TCN));
cycles = mcftmr_cnt;
local_irq_restore(flags);
return cycles + tcn;
}
/***************************************************************************/
static struct clocksource mcftmr_clk = {
.name = "tmr",
.rating = 250,
.read = mcftmr_read_clk,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/***************************************************************************/
void hw_timer_init(irq_handler_t handler)
{
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
mcftmr_cycles_per_jiffy = FREQ / HZ;
/*
* The coldfire timer runs from 0 to TRR included, then 0
* again and so on. It counts thus actually TRR + 1 steps
* for 1 tick, not TRR. So if you want n cycles,
* initialize TRR with n - 1.
*/
__raw_writetrr(mcftmr_cycles_per_jiffy - 1, TA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
clocksource_register_hz(&mcftmr_clk, FREQ);
timer_interrupt = handler;
init_timer_irq();
setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
#ifdef CONFIG_HIGHPROFILE
coldfire_profile_init();
#endif
}
/***************************************************************************/
#ifdef CONFIG_HIGHPROFILE
/***************************************************************************/
/*
* By default use timer2 as the profiler clock timer.
*/
#define PA(a) (MCFTIMER_BASE2 + (a))
/*
* Choose a reasonably fast profile timer. Make it an odd value to
* try and get good coverage of kernel operations.
*/
#define PROFILEHZ 1013
/*
* Use the other timer to provide high accuracy profiling info.
*/
irqreturn_t coldfire_profile_tick(int irq, void *dummy)
{
/* Reset ColdFire timer2 */
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, PA(MCFTIMER_TER));
if (current->pid)
profile_tick(CPU_PROFILING);
return IRQ_HANDLED;
}
/***************************************************************************/
static struct irqaction coldfire_profile_irq = {
.name = "profile timer",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = coldfire_profile_tick,
};
void coldfire_profile_init(void)
{
printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n",
PROFILEHZ);
/* Set up TIMER 2 as high speed profile clock */
__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
__raw_writetrr(((MCF_BUSCLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
setup_irq(MCF_IRQ_PROFILER, &coldfire_profile_irq);
}
/***************************************************************************/
#endif /* CONFIG_HIGHPROFILE */
/***************************************************************************/
| gpl-2.0 |
MoKee/android_kernel_huawei_msm8928 | drivers/input/misc/88pm860x_onkey.c | 4981 | 4532 | /*
* 88pm860x_onkey.c - Marvell 88PM860x ONKEY driver
*
* Copyright (C) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
#define PM8607_WAKEUP 0x0b
#define LONG_ONKEY_EN (1 << 1)
#define ONKEY_STATUS (1 << 0)
struct pm860x_onkey_info {
struct input_dev *idev;
struct pm860x_chip *chip;
struct i2c_client *i2c;
struct device *dev;
int irq;
};
/* 88PM860x gives us an interrupt when ONKEY is held */
static irqreturn_t pm860x_onkey_handler(int irq, void *data)
{
struct pm860x_onkey_info *info = data;
int ret;
ret = pm860x_reg_read(info->i2c, PM8607_STATUS_2);
ret &= ONKEY_STATUS;
input_report_key(info->idev, KEY_POWER, ret);
input_sync(info->idev);
/* Enable 8-second long onkey detection */
pm860x_set_bits(info->i2c, PM8607_WAKEUP, 3, LONG_ONKEY_EN);
return IRQ_HANDLED;
}
static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_onkey_info *info;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource!\n");
return -EINVAL;
}
info = kzalloc(sizeof(struct pm860x_onkey_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->chip = chip;
info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->dev = &pdev->dev;
info->irq = irq;
info->idev = input_allocate_device();
if (!info->idev) {
dev_err(chip->dev, "Failed to allocate input dev\n");
ret = -ENOMEM;
goto out;
}
info->idev->name = "88pm860x_on";
info->idev->phys = "88pm860x_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
info->idev->evbit[0] = BIT_MASK(EV_KEY);
info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
ret = input_register_device(info->idev);
if (ret) {
dev_err(chip->dev, "Can't register input device: %d\n", ret);
goto out_reg;
}
ret = request_threaded_irq(info->irq, NULL, pm860x_onkey_handler,
IRQF_ONESHOT, "onkey", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, ret);
goto out_irq;
}
platform_set_drvdata(pdev, info);
device_init_wakeup(&pdev->dev, 1);
return 0;
out_irq:
input_unregister_device(info->idev);
kfree(info);
return ret;
out_reg:
input_free_device(info->idev);
out:
kfree(info);
return ret;
}
static int __devexit pm860x_onkey_remove(struct platform_device *pdev)
{
struct pm860x_onkey_info *info = platform_get_drvdata(pdev);
free_irq(info->irq, info);
input_unregister_device(info->idev);
kfree(info);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pm860x_onkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag |= 1 << PM8607_IRQ_ONKEY;
return 0;
}
static int pm860x_onkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
if (device_may_wakeup(dev))
chip->wakeup_flag &= ~(1 << PM8607_IRQ_ONKEY);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pm860x_onkey_pm_ops, pm860x_onkey_suspend, pm860x_onkey_resume);
static struct platform_driver pm860x_onkey_driver = {
.driver = {
.name = "88pm860x-onkey",
.owner = THIS_MODULE,
.pm = &pm860x_onkey_pm_ops,
},
.probe = pm860x_onkey_probe,
.remove = __devexit_p(pm860x_onkey_remove),
};
module_platform_driver(pm860x_onkey_driver);
MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
yemingxing/X9180_kernel | drivers/media/video/saa7134/saa7134-tvaudio.c | 5237 | 29313 | /*
*
* device driver for philips saa7134 based TV cards
* tv audio decoder (fm stereo, nicam, ...)
*
* (c) 2001-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <asm/div64.h>
#include "saa7134-reg.h"
#include "saa7134.h"
/* ------------------------------------------------------------------ */
static unsigned int audio_debug;
module_param(audio_debug, int, 0644);
MODULE_PARM_DESC(audio_debug,"enable debug messages [tv audio]");
static unsigned int audio_ddep;
module_param(audio_ddep, int, 0644);
MODULE_PARM_DESC(audio_ddep,"audio ddep overwrite");
static int audio_clock_override = UNSET;
module_param(audio_clock_override, int, 0644);
static int audio_clock_tweak;
module_param(audio_clock_tweak, int, 0644);
MODULE_PARM_DESC(audio_clock_tweak, "Audio clock tick fine tuning for cards with audio crystal that's slightly off (range [-1024 .. 1024])");
#define dprintk(fmt, arg...) if (audio_debug) \
printk(KERN_DEBUG "%s/audio: " fmt, dev->name , ## arg)
#define d2printk(fmt, arg...) if (audio_debug > 1) \
printk(KERN_DEBUG "%s/audio: " fmt, dev->name, ## arg)
#define print_regb(reg) printk("%s: reg 0x%03x [%-16s]: 0x%02x\n", \
dev->name,(SAA7134_##reg),(#reg),saa_readb((SAA7134_##reg)))
/* msecs */
#define SCAN_INITIAL_DELAY 1000
#define SCAN_SAMPLE_DELAY 200
#define SCAN_SUBCARRIER_DELAY 2000
/* ------------------------------------------------------------------ */
/* saa7134 code */
static struct mainscan {
char *name;
v4l2_std_id std;
int carr;
} mainscan[] = {
{
.name = "MN",
.std = V4L2_STD_MN,
.carr = 4500,
},{
.name = "BGH",
.std = V4L2_STD_B | V4L2_STD_GH,
.carr = 5500,
},{
.name = "I",
.std = V4L2_STD_PAL_I,
.carr = 6000,
},{
.name = "DKL",
.std = V4L2_STD_DK | V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC,
.carr = 6500,
}
};
static struct saa7134_tvaudio tvaudio[] = {
{
.name = "PAL-B/G FM-stereo",
.std = V4L2_STD_PAL_BG,
.mode = TVAUDIO_FM_BG_STEREO,
.carr1 = 5500,
.carr2 = 5742,
},{
.name = "PAL-D/K1 FM-stereo",
.std = V4L2_STD_PAL_DK,
.carr1 = 6500,
.carr2 = 6258,
.mode = TVAUDIO_FM_BG_STEREO,
},{
.name = "PAL-D/K2 FM-stereo",
.std = V4L2_STD_PAL_DK,
.carr1 = 6500,
.carr2 = 6742,
.mode = TVAUDIO_FM_BG_STEREO,
},{
.name = "PAL-D/K3 FM-stereo",
.std = V4L2_STD_PAL_DK,
.carr1 = 6500,
.carr2 = 5742,
.mode = TVAUDIO_FM_BG_STEREO,
},{
.name = "PAL-B/G NICAM",
.std = V4L2_STD_PAL_BG,
.carr1 = 5500,
.carr2 = 5850,
.mode = TVAUDIO_NICAM_FM,
},{
.name = "PAL-I NICAM",
.std = V4L2_STD_PAL_I,
.carr1 = 6000,
.carr2 = 6552,
.mode = TVAUDIO_NICAM_FM,
},{
.name = "PAL-D/K NICAM",
.std = V4L2_STD_PAL_DK,
.carr1 = 6500,
.carr2 = 5850,
.mode = TVAUDIO_NICAM_FM,
},{
.name = "SECAM-L NICAM",
.std = V4L2_STD_SECAM_L,
.carr1 = 6500,
.carr2 = 5850,
.mode = TVAUDIO_NICAM_AM,
},{
.name = "SECAM-D/K NICAM",
.std = V4L2_STD_SECAM_DK,
.carr1 = 6500,
.carr2 = 5850,
.mode = TVAUDIO_NICAM_FM,
},{
.name = "NTSC-A2 FM-stereo",
.std = V4L2_STD_NTSC,
.carr1 = 4500,
.carr2 = 4724,
.mode = TVAUDIO_FM_K_STEREO,
},{
.name = "NTSC-M",
.std = V4L2_STD_NTSC,
.carr1 = 4500,
.carr2 = -1,
.mode = TVAUDIO_FM_MONO,
}
};
#define TVAUDIO ARRAY_SIZE(tvaudio)
/* ------------------------------------------------------------------ */
static u32 tvaudio_carr2reg(u32 carrier)
{
u64 a = carrier;
a <<= 24;
do_div(a,12288);
return a;
}
static void tvaudio_setcarrier(struct saa7134_dev *dev,
int primary, int secondary)
{
if (-1 == secondary)
secondary = primary;
saa_writel(SAA7134_CARRIER1_FREQ0 >> 2, tvaudio_carr2reg(primary));
saa_writel(SAA7134_CARRIER2_FREQ0 >> 2, tvaudio_carr2reg(secondary));
}
#define SAA7134_MUTE_MASK 0xbb
#define SAA7134_MUTE_ANALOG 0x04
#define SAA7134_MUTE_I2S 0x40
static void mute_input_7134(struct saa7134_dev *dev)
{
unsigned int mute;
struct saa7134_input *in;
int ausel=0, ics=0, ocs=0;
int mask;
/* look what is to do ... */
in = dev->input;
mute = (dev->ctl_mute ||
(dev->automute && (&card(dev).radio) != in));
if (card(dev).mute.name) {
/*
* 7130 - we'll mute using some unconnected audio input
* 7134 - we'll probably should switch external mux with gpio
*/
if (mute)
in = &card(dev).mute;
}
if (dev->hw_mute == mute &&
dev->hw_input == in && !dev->insuspend) {
dprintk("mute/input: nothing to do [mute=%d,input=%s]\n",
mute,in->name);
return;
}
dprintk("ctl_mute=%d automute=%d input=%s => mute=%d input=%s\n",
dev->ctl_mute,dev->automute,dev->input->name,mute,in->name);
dev->hw_mute = mute;
dev->hw_input = in;
if (PCI_DEVICE_ID_PHILIPS_SAA7134 == dev->pci->device)
/* 7134 mute */
saa_writeb(SAA7134_AUDIO_MUTE_CTRL, mute ?
SAA7134_MUTE_MASK |
SAA7134_MUTE_ANALOG |
SAA7134_MUTE_I2S :
SAA7134_MUTE_MASK);
/* switch internal audio mux */
switch (in->amux) {
case TV: ausel=0xc0; ics=0x00; ocs=0x02; break;
case LINE1: ausel=0x80; ics=0x00; ocs=0x00; break;
case LINE2: ausel=0x80; ics=0x08; ocs=0x01; break;
case LINE2_LEFT: ausel=0x80; ics=0x08; ocs=0x05; break;
}
saa_andorb(SAA7134_AUDIO_FORMAT_CTRL, 0xc0, ausel);
saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x08, ics);
saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, ocs);
// for oss, we need to change the clock configuration
if (in->amux == TV)
saa_andorb(SAA7134_SIF_SAMPLE_FREQ, 0x03, 0x00);
else
saa_andorb(SAA7134_SIF_SAMPLE_FREQ, 0x03, 0x01);
/* switch gpio-connected external audio mux */
if (0 == card(dev).gpiomask)
return;
mask = card(dev).gpiomask;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio);
saa7134_track_gpio(dev,in->name);
}
static void tvaudio_setmode(struct saa7134_dev *dev,
struct saa7134_tvaudio *audio,
char *note)
{
int acpf, tweak = 0;
if (dev->tvnorm->id == V4L2_STD_NTSC) {
acpf = 0x19066;
} else {
acpf = 0x1e000;
}
if (audio_clock_tweak > -1024 && audio_clock_tweak < 1024)
tweak = audio_clock_tweak;
if (note)
dprintk("tvaudio_setmode: %s %s [%d.%03d/%d.%03d MHz] acpf=%d%+d\n",
note,audio->name,
audio->carr1 / 1000, audio->carr1 % 1000,
audio->carr2 / 1000, audio->carr2 % 1000,
acpf, tweak);
acpf += tweak;
saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD0, (acpf & 0x0000ff) >> 0);
saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD1, (acpf & 0x00ff00) >> 8);
saa_writeb(SAA7134_AUDIO_CLOCKS_PER_FIELD2, (acpf & 0x030000) >> 16);
tvaudio_setcarrier(dev,audio->carr1,audio->carr2);
switch (audio->mode) {
case TVAUDIO_FM_MONO:
case TVAUDIO_FM_BG_STEREO:
saa_writeb(SAA7134_DEMODULATOR, 0x00);
saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00);
saa_writeb(SAA7134_FM_DEEMPHASIS, 0x22);
saa_writeb(SAA7134_FM_DEMATRIX, 0x80);
saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa0);
break;
case TVAUDIO_FM_K_STEREO:
saa_writeb(SAA7134_DEMODULATOR, 0x00);
saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x01);
saa_writeb(SAA7134_FM_DEEMPHASIS, 0x22);
saa_writeb(SAA7134_FM_DEMATRIX, 0x80);
saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa0);
break;
case TVAUDIO_NICAM_FM:
saa_writeb(SAA7134_DEMODULATOR, 0x10);
saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00);
saa_writeb(SAA7134_FM_DEEMPHASIS, 0x44);
saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1);
saa_writeb(SAA7134_NICAM_CONFIG, 0x00);
break;
case TVAUDIO_NICAM_AM:
saa_writeb(SAA7134_DEMODULATOR, 0x12);
saa_writeb(SAA7134_DCXO_IDENT_CTRL, 0x00);
saa_writeb(SAA7134_FM_DEEMPHASIS, 0x44);
saa_writeb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0xa1);
saa_writeb(SAA7134_NICAM_CONFIG, 0x00);
break;
case TVAUDIO_FM_SAT_STEREO:
/* not implemented (yet) */
break;
}
}
static int tvaudio_sleep(struct saa7134_dev *dev, int timeout)
{
if (dev->thread.scan1 == dev->thread.scan2 &&
!kthread_should_stop()) {
if (timeout < 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
} else {
schedule_timeout_interruptible
(msecs_to_jiffies(timeout));
}
}
return dev->thread.scan1 != dev->thread.scan2;
}
static int tvaudio_checkcarrier(struct saa7134_dev *dev, struct mainscan *scan)
{
__s32 left,right,value;
if (!(dev->tvnorm->id & scan->std)) {
value = 0;
dprintk("skipping %d.%03d MHz [%4s]\n",
scan->carr / 1000, scan->carr % 1000, scan->name);
return 0;
}
if (audio_debug > 1) {
int i;
dprintk("debug %d:",scan->carr);
for (i = -150; i <= 150; i += 30) {
tvaudio_setcarrier(dev,scan->carr+i,scan->carr+i);
saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
return -1;
value = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
if (0 == i)
printk(" # %6d # ",value >> 16);
else
printk(" %6d",value >> 16);
}
printk("\n");
}
tvaudio_setcarrier(dev,scan->carr-90,scan->carr-90);
saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
return -1;
left = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
tvaudio_setcarrier(dev,scan->carr+90,scan->carr+90);
saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
if (tvaudio_sleep(dev,SCAN_SAMPLE_DELAY))
return -1;
right = saa_readl(SAA7134_LEVEL_READOUT1 >> 2);
left >>= 16;
right >>= 16;
value = left > right ? left - right : right - left;
dprintk("scanning %d.%03d MHz [%4s] => dc is %5d [%d/%d]\n",
scan->carr / 1000, scan->carr % 1000,
scan->name, value, left, right);
return value;
}
static int tvaudio_getstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *audio)
{
__u32 idp, nicam, nicam_status;
int retval = -1;
switch (audio->mode) {
case TVAUDIO_FM_MONO:
return V4L2_TUNER_SUB_MONO;
case TVAUDIO_FM_K_STEREO:
case TVAUDIO_FM_BG_STEREO:
idp = (saa_readb(SAA7134_IDENT_SIF) & 0xe0) >> 5;
dprintk("getstereo: fm/stereo: idp=0x%x\n",idp);
if (0x03 == (idp & 0x03))
retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
else if (0x05 == (idp & 0x05))
retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
else if (0x01 == (idp & 0x01))
retval = V4L2_TUNER_SUB_MONO;
break;
case TVAUDIO_FM_SAT_STEREO:
/* not implemented (yet) */
break;
case TVAUDIO_NICAM_FM:
case TVAUDIO_NICAM_AM:
nicam = saa_readb(SAA7134_AUDIO_STATUS);
dprintk("getstereo: nicam=0x%x\n",nicam);
if (nicam & 0x1) {
nicam_status = saa_readb(SAA7134_NICAM_STATUS);
dprintk("getstereo: nicam_status=0x%x\n", nicam_status);
switch (nicam_status & 0x03) {
case 0x01:
retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
break;
case 0x02:
retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
break;
default:
retval = V4L2_TUNER_SUB_MONO;
}
} else {
/* No nicam detected */
}
break;
}
if (retval != -1)
dprintk("found audio subchannels:%s%s%s%s\n",
(retval & V4L2_TUNER_SUB_MONO) ? " mono" : "",
(retval & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
(retval & V4L2_TUNER_SUB_LANG1) ? " lang1" : "",
(retval & V4L2_TUNER_SUB_LANG2) ? " lang2" : "");
return retval;
}
static int tvaudio_setstereo(struct saa7134_dev *dev, struct saa7134_tvaudio *audio,
u32 mode)
{
static char *name[] = {
[ V4L2_TUNER_MODE_MONO ] = "mono",
[ V4L2_TUNER_MODE_STEREO ] = "stereo",
[ V4L2_TUNER_MODE_LANG1 ] = "lang1",
[ V4L2_TUNER_MODE_LANG2 ] = "lang2",
[ V4L2_TUNER_MODE_LANG1_LANG2 ] = "lang1+lang2",
};
static u32 fm[] = {
[ V4L2_TUNER_MODE_MONO ] = 0x00, /* ch1 */
[ V4L2_TUNER_MODE_STEREO ] = 0x80, /* auto */
[ V4L2_TUNER_MODE_LANG1 ] = 0x00, /* ch1 */
[ V4L2_TUNER_MODE_LANG2 ] = 0x01, /* ch2 */
[ V4L2_TUNER_MODE_LANG1_LANG2 ] = 0x80, /* auto */
};
u32 reg;
switch (audio->mode) {
case TVAUDIO_FM_MONO:
/* nothing to do ... */
break;
case TVAUDIO_FM_K_STEREO:
case TVAUDIO_FM_BG_STEREO:
case TVAUDIO_NICAM_AM:
case TVAUDIO_NICAM_FM:
dprintk("setstereo [fm] => %s\n",
name[ mode % ARRAY_SIZE(name) ]);
reg = fm[ mode % ARRAY_SIZE(fm) ];
saa_writeb(SAA7134_FM_DEMATRIX, reg);
break;
case TVAUDIO_FM_SAT_STEREO:
/* Not implemented */
break;
}
return 0;
}
static int tvaudio_thread(void *data)
{
struct saa7134_dev *dev = data;
int carr_vals[ARRAY_SIZE(mainscan)];
unsigned int i, audio, nscan;
int max1,max2,carrier,rx,mode,lastmode,default_carrier;
set_freezable();
for (;;) {
tvaudio_sleep(dev,-1);
if (kthread_should_stop())
goto done;
restart:
try_to_freeze();
dev->thread.scan1 = dev->thread.scan2;
dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
dev->tvaudio = NULL;
saa_writeb(SAA7134_MONITOR_SELECT, 0xa0);
saa_writeb(SAA7134_FM_DEMATRIX, 0x80);
if (dev->ctl_automute)
dev->automute = 1;
mute_input_7134(dev);
/* give the tuner some time */
if (tvaudio_sleep(dev,SCAN_INITIAL_DELAY))
goto restart;
max1 = 0;
max2 = 0;
nscan = 0;
carrier = 0;
default_carrier = 0;
for (i = 0; i < ARRAY_SIZE(mainscan); i++) {
if (!(dev->tvnorm->id & mainscan[i].std))
continue;
if (!default_carrier)
default_carrier = mainscan[i].carr;
nscan++;
}
if (1 == nscan) {
/* only one candidate -- skip scan ;) */
dprintk("only one main carrier candidate - skipping scan\n");
max1 = 12345;
carrier = default_carrier;
} else {
/* scan for the main carrier */
saa_writeb(SAA7134_MONITOR_SELECT,0x00);
tvaudio_setmode(dev,&tvaudio[0],NULL);
for (i = 0; i < ARRAY_SIZE(mainscan); i++) {
carr_vals[i] = tvaudio_checkcarrier(dev, mainscan+i);
if (dev->thread.scan1 != dev->thread.scan2)
goto restart;
}
for (max1 = 0, max2 = 0, i = 0; i < ARRAY_SIZE(mainscan); i++) {
if (max1 < carr_vals[i]) {
max2 = max1;
max1 = carr_vals[i];
carrier = mainscan[i].carr;
} else if (max2 < carr_vals[i]) {
max2 = carr_vals[i];
}
}
}
if (0 != carrier && max1 > 2000 && max1 > max2*3) {
/* found good carrier */
dprintk("found %s main sound carrier @ %d.%03d MHz [%d/%d]\n",
dev->tvnorm->name, carrier/1000, carrier%1000,
max1, max2);
dev->last_carrier = carrier;
dev->automute = 0;
} else if (0 != dev->last_carrier) {
/* no carrier -- try last detected one as fallback */
carrier = dev->last_carrier;
dprintk("audio carrier scan failed, "
"using %d.%03d MHz [last detected]\n",
carrier/1000, carrier%1000);
dev->automute = 1;
} else {
/* no carrier + no fallback -- use default */
carrier = default_carrier;
dprintk("audio carrier scan failed, "
"using %d.%03d MHz [default]\n",
carrier/1000, carrier%1000);
dev->automute = 1;
}
tvaudio_setcarrier(dev,carrier,carrier);
saa_andorb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0x30, 0x00);
saa7134_tvaudio_setmute(dev);
/* find the exact tv audio norm */
for (audio = UNSET, i = 0; i < TVAUDIO; i++) {
if (dev->tvnorm->id != UNSET &&
!(dev->tvnorm->id & tvaudio[i].std))
continue;
if (tvaudio[i].carr1 != carrier)
continue;
/* Note: at least the primary carrier is right here */
if (UNSET == audio)
audio = i;
tvaudio_setmode(dev,&tvaudio[i],"trying");
if (tvaudio_sleep(dev,SCAN_SUBCARRIER_DELAY))
goto restart;
if (-1 != tvaudio_getstereo(dev,&tvaudio[i])) {
audio = i;
break;
}
}
saa_andorb(SAA7134_STEREO_DAC_OUTPUT_SELECT, 0x30, 0x30);
if (UNSET == audio)
continue;
tvaudio_setmode(dev,&tvaudio[audio],"using");
tvaudio_setstereo(dev,&tvaudio[audio],V4L2_TUNER_MODE_MONO);
dev->tvaudio = &tvaudio[audio];
lastmode = 42;
for (;;) {
try_to_freeze();
if (tvaudio_sleep(dev,5000))
goto restart;
if (kthread_should_stop())
break;
if (UNSET == dev->thread.mode) {
rx = tvaudio_getstereo(dev, &tvaudio[audio]);
mode = saa7134_tvaudio_rx2mode(rx);
} else {
mode = dev->thread.mode;
}
if (lastmode != mode) {
tvaudio_setstereo(dev,&tvaudio[audio],mode);
lastmode = mode;
}
}
}
done:
dev->thread.stopped = 1;
return 0;
}
/* ------------------------------------------------------------------ */
/* saa7133 / saa7135 code */
static char *stdres[0x20] = {
[0x00] = "no standard detected",
[0x01] = "B/G (in progress)",
[0x02] = "D/K (in progress)",
[0x03] = "M (in progress)",
[0x04] = "B/G A2",
[0x05] = "B/G NICAM",
[0x06] = "D/K A2 (1)",
[0x07] = "D/K A2 (2)",
[0x08] = "D/K A2 (3)",
[0x09] = "D/K NICAM",
[0x0a] = "L NICAM",
[0x0b] = "I NICAM",
[0x0c] = "M Korea",
[0x0d] = "M BTSC ",
[0x0e] = "M EIAJ",
[0x0f] = "FM radio / IF 10.7 / 50 deemp",
[0x10] = "FM radio / IF 10.7 / 75 deemp",
[0x11] = "FM radio / IF sel / 50 deemp",
[0x12] = "FM radio / IF sel / 75 deemp",
[0x13 ... 0x1e ] = "unknown",
[0x1f] = "??? [in progress]",
};
#define DSP_RETRY 32
#define DSP_DELAY 16
#define SAA7135_DSP_RWCLEAR_RERR 1
static inline int saa_dsp_reset_error_bit(struct saa7134_dev *dev)
{
int state = saa_readb(SAA7135_DSP_RWSTATE);
if (unlikely(state & SAA7135_DSP_RWSTATE_ERR)) {
d2printk("%s: resetting error bit\n", dev->name);
saa_writeb(SAA7135_DSP_RWCLEAR, SAA7135_DSP_RWCLEAR_RERR);
}
return 0;
}
static inline int saa_dsp_wait_bit(struct saa7134_dev *dev, int bit)
{
int state, count = DSP_RETRY;
state = saa_readb(SAA7135_DSP_RWSTATE);
if (unlikely(state & SAA7135_DSP_RWSTATE_ERR)) {
printk(KERN_WARNING "%s: dsp access error\n", dev->name);
saa_dsp_reset_error_bit(dev);
return -EIO;
}
while (0 == (state & bit)) {
if (unlikely(0 == count)) {
printk("%s: dsp access wait timeout [bit=%s]\n",
dev->name,
(bit & SAA7135_DSP_RWSTATE_WRR) ? "WRR" :
(bit & SAA7135_DSP_RWSTATE_RDB) ? "RDB" :
(bit & SAA7135_DSP_RWSTATE_IDA) ? "IDA" :
"???");
return -EIO;
}
saa_wait(DSP_DELAY);
state = saa_readb(SAA7135_DSP_RWSTATE);
count--;
}
return 0;
}
int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
{
int err;
d2printk("dsp write reg 0x%x = 0x%06x\n",reg<<2,value);
err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
if (err < 0)
return err;
saa_writel(reg,value);
err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
if (err < 0)
return err;
return 0;
}
static int getstereo_7133(struct saa7134_dev *dev)
{
int retval = V4L2_TUNER_SUB_MONO;
u32 value;
value = saa_readl(0x528 >> 2);
if (value & 0x20)
retval = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
if (value & 0x40)
retval = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
return retval;
}
static int mute_input_7133(struct saa7134_dev *dev)
{
u32 reg = 0;
u32 xbarin, xbarout;
int mask;
struct saa7134_input *in;
xbarin = 0x03;
switch (dev->input->amux) {
case TV:
reg = 0x02;
xbarin = 0;
break;
case LINE1:
reg = 0x00;
break;
case LINE2:
case LINE2_LEFT:
reg = 0x09;
break;
}
saa_dsp_writel(dev, 0x464 >> 2, xbarin);
if (dev->ctl_mute) {
reg = 0x07;
xbarout = 0xbbbbbb;
} else
xbarout = 0xbbbb10;
saa_dsp_writel(dev, 0x46c >> 2, xbarout);
saa_writel(0x594 >> 2, reg);
/* switch gpio-connected external audio mux */
if (0 != card(dev).gpiomask) {
mask = card(dev).gpiomask;
if (card(dev).mute.name && dev->ctl_mute)
in = &card(dev).mute;
else
in = dev->input;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio);
saa7134_track_gpio(dev,in->name);
}
return 0;
}
static int tvaudio_thread_ddep(void *data)
{
struct saa7134_dev *dev = data;
u32 value, norms;
set_freezable();
for (;;) {
tvaudio_sleep(dev,-1);
if (kthread_should_stop())
goto done;
restart:
try_to_freeze();
dev->thread.scan1 = dev->thread.scan2;
dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1);
if (audio_ddep >= 0x04 && audio_ddep <= 0x0e) {
/* insmod option override */
norms = (audio_ddep << 2) | 0x01;
dprintk("ddep override: %s\n",stdres[audio_ddep]);
} else if (&card(dev).radio == dev->input) {
dprintk("FM Radio\n");
if (dev->tuner_type == TUNER_PHILIPS_TDA8290) {
norms = (0x11 << 2) | 0x01;
saa_dsp_writel(dev, 0x42c >> 2, 0x729555);
} else {
norms = (0x0f << 2) | 0x01;
}
} else {
/* (let chip) scan for sound carrier */
norms = 0;
if (dev->tvnorm->id & (V4L2_STD_B | V4L2_STD_GH))
norms |= 0x04;
if (dev->tvnorm->id & V4L2_STD_PAL_I)
norms |= 0x20;
if (dev->tvnorm->id & V4L2_STD_DK)
norms |= 0x08;
if (dev->tvnorm->id & V4L2_STD_MN)
norms |= 0x40;
if (dev->tvnorm->id & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))
norms |= 0x10;
if (0 == norms)
norms = 0x7c; /* all */
dprintk("scanning:%s%s%s%s%s\n",
(norms & 0x04) ? " B/G" : "",
(norms & 0x08) ? " D/K" : "",
(norms & 0x10) ? " L/L'" : "",
(norms & 0x20) ? " I" : "",
(norms & 0x40) ? " M" : "");
}
/* kick automatic standard detection */
saa_dsp_writel(dev, 0x454 >> 2, 0);
saa_dsp_writel(dev, 0x454 >> 2, norms | 0x80);
/* setup crossbars */
saa_dsp_writel(dev, 0x464 >> 2, 0x000000);
saa_dsp_writel(dev, 0x470 >> 2, 0x101010);
if (tvaudio_sleep(dev,3000))
goto restart;
value = saa_readl(0x528 >> 2) & 0xffffff;
dprintk("tvaudio thread status: 0x%x [%s%s%s]\n",
value, stdres[value & 0x1f],
(value & 0x000020) ? ",stereo" : "",
(value & 0x000040) ? ",dual" : "");
dprintk("detailed status: "
"%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n",
(value & 0x000080) ? " A2/EIAJ pilot tone " : "",
(value & 0x000100) ? " A2/EIAJ dual " : "",
(value & 0x000200) ? " A2/EIAJ stereo " : "",
(value & 0x000400) ? " A2/EIAJ noise mute " : "",
(value & 0x000800) ? " BTSC/FM radio pilot " : "",
(value & 0x001000) ? " SAP carrier " : "",
(value & 0x002000) ? " BTSC stereo noise mute " : "",
(value & 0x004000) ? " SAP noise mute " : "",
(value & 0x008000) ? " VDSP " : "",
(value & 0x010000) ? " NICST " : "",
(value & 0x020000) ? " NICDU " : "",
(value & 0x040000) ? " NICAM muted " : "",
(value & 0x080000) ? " NICAM reserve sound " : "",
(value & 0x100000) ? " init done " : "");
}
done:
dev->thread.stopped = 1;
return 0;
}
/* ------------------------------------------------------------------ */
/* common stuff + external entry points */
void saa7134_enable_i2s(struct saa7134_dev *dev)
{
int i2s_format;
if (!card_is_empress(dev))
return;
if (dev->pci->device == PCI_DEVICE_ID_PHILIPS_SAA7130)
return;
/* configure GPIO for out */
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0E000000, 0x00000000);
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
/* Set I2S format (SONY) */
saa_writeb(SAA7133_I2S_AUDIO_CONTROL, 0x00);
/* Start I2S */
saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x11);
break;
case PCI_DEVICE_ID_PHILIPS_SAA7134:
i2s_format = (dev->input->amux == TV) ? 0x00 : 0x01;
/* enable I2S audio output for the mpeg encoder */
saa_writeb(SAA7134_I2S_OUTPUT_SELECT, 0x80);
saa_writeb(SAA7134_I2S_OUTPUT_FORMAT, i2s_format);
saa_writeb(SAA7134_I2S_OUTPUT_LEVEL, 0x0F);
saa_writeb(SAA7134_I2S_AUDIO_OUTPUT, 0x01);
default:
break;
}
}
int saa7134_tvaudio_rx2mode(u32 rx)
{
u32 mode;
mode = V4L2_TUNER_MODE_MONO;
if (rx & V4L2_TUNER_SUB_STEREO)
mode = V4L2_TUNER_MODE_STEREO;
else if (rx & V4L2_TUNER_SUB_LANG1)
mode = V4L2_TUNER_MODE_LANG1;
else if (rx & V4L2_TUNER_SUB_LANG2)
mode = V4L2_TUNER_MODE_LANG2;
return mode;
}
void saa7134_tvaudio_setmute(struct saa7134_dev *dev)
{
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7130:
case PCI_DEVICE_ID_PHILIPS_SAA7134:
mute_input_7134(dev);
break;
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
mute_input_7133(dev);
break;
}
}
void saa7134_tvaudio_setinput(struct saa7134_dev *dev,
struct saa7134_input *in)
{
dev->input = in;
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7130:
case PCI_DEVICE_ID_PHILIPS_SAA7134:
mute_input_7134(dev);
break;
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
mute_input_7133(dev);
break;
}
saa7134_enable_i2s(dev);
}
void saa7134_tvaudio_setvolume(struct saa7134_dev *dev, int level)
{
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7134:
saa_writeb(SAA7134_CHANNEL1_LEVEL, level & 0x1f);
saa_writeb(SAA7134_CHANNEL2_LEVEL, level & 0x1f);
saa_writeb(SAA7134_NICAM_LEVEL_ADJUST, level & 0x1f);
break;
}
}
int saa7134_tvaudio_getstereo(struct saa7134_dev *dev)
{
int retval = V4L2_TUNER_SUB_MONO;
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7134:
if (dev->tvaudio)
retval = tvaudio_getstereo(dev,dev->tvaudio);
break;
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
retval = getstereo_7133(dev);
break;
}
return retval;
}
void saa7134_tvaudio_init(struct saa7134_dev *dev)
{
int clock = saa7134_boards[dev->board].audio_clock;
if (UNSET != audio_clock_override)
clock = audio_clock_override;
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7134:
/* init all audio registers */
saa_writeb(SAA7134_AUDIO_PLL_CTRL, 0x00);
if (need_resched())
schedule();
else
udelay(10);
saa_writeb(SAA7134_AUDIO_CLOCK0, clock & 0xff);
saa_writeb(SAA7134_AUDIO_CLOCK1, (clock >> 8) & 0xff);
saa_writeb(SAA7134_AUDIO_CLOCK2, (clock >> 16) & 0xff);
/* frame locked audio is mandatory for NICAM */
saa_writeb(SAA7134_AUDIO_PLL_CTRL, 0x01);
saa_writeb(SAA7134_NICAM_ERROR_LOW, 0x14);
saa_writeb(SAA7134_NICAM_ERROR_HIGH, 0x50);
break;
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
saa_writel(0x598 >> 2, clock);
saa_dsp_writel(dev, 0x474 >> 2, 0x00);
saa_dsp_writel(dev, 0x450 >> 2, 0x00);
}
}
int saa7134_tvaudio_init2(struct saa7134_dev *dev)
{
int (*my_thread)(void *data) = NULL;
switch (dev->pci->device) {
case PCI_DEVICE_ID_PHILIPS_SAA7134:
my_thread = tvaudio_thread;
break;
case PCI_DEVICE_ID_PHILIPS_SAA7133:
case PCI_DEVICE_ID_PHILIPS_SAA7135:
my_thread = tvaudio_thread_ddep;
break;
}
dev->thread.thread = NULL;
dev->thread.scan1 = dev->thread.scan2 = 0;
if (my_thread) {
saa7134_tvaudio_init(dev);
/* start tvaudio thread */
dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name);
if (IS_ERR(dev->thread.thread)) {
printk(KERN_WARNING "%s: kernel_thread() failed\n",
dev->name);
/* XXX: missing error handling here */
}
}
saa7134_enable_i2s(dev);
return 0;
}
int saa7134_tvaudio_close(struct saa7134_dev *dev)
{
dev->automute = 1;
/* anything else to undo? */
return 0;
}
int saa7134_tvaudio_fini(struct saa7134_dev *dev)
{
/* shutdown tvaudio thread */
if (dev->thread.thread && !dev->thread.stopped)
kthread_stop(dev->thread.thread);
saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */
return 0;
}
int saa7134_tvaudio_do_scan(struct saa7134_dev *dev)
{
if (dev->input->amux != TV) {
dprintk("sound IF not in use, skipping scan\n");
dev->automute = 0;
saa7134_tvaudio_setmute(dev);
} else if (dev->thread.thread) {
dev->thread.mode = UNSET;
dev->thread.scan2++;
if (!dev->insuspend && !dev->thread.stopped)
wake_up_process(dev->thread.thread);
} else {
dev->automute = 0;
saa7134_tvaudio_setmute(dev);
}
return 0;
}
EXPORT_SYMBOL(saa_dsp_writel);
EXPORT_SYMBOL(saa7134_tvaudio_setmute);
/* ----------------------------------------------------------- */
/*
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
fell978/kernel_huawei | scripts/dtc/libfdt/fdt_ro.c | 7285 | 11555 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_nodename_eq(const void *fdt, int offset,
const char *s, int len)
{
const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
if (! p)
/* short match */
return 0;
if (memcmp(p, s, len) != 0)
return 0;
if (p[len] == '\0')
return 1;
else if (!memchr(s, '@', len) && (p[len] == '@'))
return 1;
else
return 0;
}
const char *fdt_string(const void *fdt, int stroffset)
{
return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
}
int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
{
FDT_CHECK_HEADER(fdt);
*address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
*size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
return 0;
}
int fdt_num_mem_rsv(const void *fdt)
{
int i = 0;
while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
i++;
return i;
}
int fdt_subnode_offset_namelen(const void *fdt, int offset,
const char *name, int namelen)
{
int depth;
FDT_CHECK_HEADER(fdt);
for (depth = 0, offset = fdt_next_node(fdt, offset, &depth);
(offset >= 0) && (depth > 0);
offset = fdt_next_node(fdt, offset, &depth)) {
if (depth < 0)
return -FDT_ERR_NOTFOUND;
else if ((depth == 1)
&& _fdt_nodename_eq(fdt, offset, name, namelen))
return offset;
}
if (offset < 0)
return offset; /* error */
else
return -FDT_ERR_NOTFOUND;
}
int fdt_subnode_offset(const void *fdt, int parentoffset,
const char *name)
{
return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
}
int fdt_path_offset(const void *fdt, const char *path)
{
const char *end = path + strlen(path);
const char *p = path;
int offset = 0;
FDT_CHECK_HEADER(fdt);
if (*path != '/')
return -FDT_ERR_BADPATH;
while (*p) {
const char *q;
while (*p == '/')
p++;
if (! *p)
return offset;
q = strchr(p, '/');
if (! q)
q = end;
offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
if (offset < 0)
return offset;
p = q;
}
return offset;
}
const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
{
const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
int err;
if (((err = fdt_check_header(fdt)) != 0)
|| ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
goto fail;
if (len)
*len = strlen(nh->name);
return nh->name;
fail:
if (len)
*len = err;
return NULL;
}
const struct fdt_property *fdt_get_property(const void *fdt,
int nodeoffset,
const char *name, int *lenp)
{
uint32_t tag;
const struct fdt_property *prop;
int namestroff;
int offset, nextoffset;
int err;
if (((err = fdt_check_header(fdt)) != 0)
|| ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
goto fail;
nextoffset = err;
do {
offset = nextoffset;
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_END:
err = -FDT_ERR_TRUNCATED;
goto fail;
case FDT_BEGIN_NODE:
case FDT_END_NODE:
case FDT_NOP:
break;
case FDT_PROP:
err = -FDT_ERR_BADSTRUCTURE;
prop = fdt_offset_ptr(fdt, offset, sizeof(*prop));
if (! prop)
goto fail;
namestroff = fdt32_to_cpu(prop->nameoff);
if (strcmp(fdt_string(fdt, namestroff), name) == 0) {
/* Found it! */
int len = fdt32_to_cpu(prop->len);
prop = fdt_offset_ptr(fdt, offset,
sizeof(*prop)+len);
if (! prop)
goto fail;
if (lenp)
*lenp = len;
return prop;
}
break;
default:
err = -FDT_ERR_BADSTRUCTURE;
goto fail;
}
} while ((tag != FDT_BEGIN_NODE) && (tag != FDT_END_NODE));
err = -FDT_ERR_NOTFOUND;
fail:
if (lenp)
*lenp = err;
return NULL;
}
const void *fdt_getprop(const void *fdt, int nodeoffset,
const char *name, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property(fdt, nodeoffset, name, lenp);
if (! prop)
return NULL;
return prop->data;
}
uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
{
const uint32_t *php;
int len;
php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
if (!php || (len != sizeof(*php)))
return 0;
return fdt32_to_cpu(*php);
}
int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
{
int pdepth = 0, p = 0;
int offset, depth, namelen;
const char *name;
FDT_CHECK_HEADER(fdt);
if (buflen < 2)
return -FDT_ERR_NOSPACE;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
if (pdepth < depth)
continue; /* overflowed buffer */
while (pdepth > depth) {
do {
p--;
} while (buf[p-1] != '/');
pdepth--;
}
name = fdt_get_name(fdt, offset, &namelen);
if (!name)
return namelen;
if ((p + namelen + 1) <= buflen) {
memcpy(buf + p, name, namelen);
p += namelen;
buf[p++] = '/';
pdepth++;
}
if (offset == nodeoffset) {
if (pdepth < (depth + 1))
return -FDT_ERR_NOSPACE;
if (p > 1) /* special case so that root path is "/", not "" */
p--;
buf[p] = '\0';
return p;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
int supernodedepth, int *nodedepth)
{
int offset, depth;
int supernodeoffset = -FDT_ERR_INTERNAL;
FDT_CHECK_HEADER(fdt);
if (supernodedepth < 0)
return -FDT_ERR_NOTFOUND;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
if (depth == supernodedepth)
supernodeoffset = offset;
if (offset == nodeoffset) {
if (nodedepth)
*nodedepth = depth;
if (supernodedepth > depth)
return -FDT_ERR_NOTFOUND;
else
return supernodeoffset;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_node_depth(const void *fdt, int nodeoffset)
{
int nodedepth;
int err;
err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
if (err)
return (err < 0) ? err : -FDT_ERR_INTERNAL;
return nodedepth;
}
int fdt_parent_offset(const void *fdt, int nodeoffset)
{
int nodedepth = fdt_node_depth(fdt, nodeoffset);
if (nodedepth < 0)
return nodedepth;
return fdt_supernode_atdepth_offset(fdt, nodeoffset,
nodedepth - 1, NULL);
}
int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
const char *propname,
const void *propval, int proplen)
{
int offset;
const void *val;
int len;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_getprop(), then if that didn't
* find what we want, we scan over them again making our way
* to the next node. Still it's the easiest to implement
* approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
val = fdt_getprop(fdt, offset, propname, &len);
if (val && (len == proplen)
&& (memcmp(val, propval, len) == 0))
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
{
if ((phandle == 0) || (phandle == -1))
return -FDT_ERR_BADPHANDLE;
phandle = cpu_to_fdt32(phandle);
return fdt_node_offset_by_prop_value(fdt, -1, "linux,phandle",
&phandle, sizeof(phandle));
}
static int _stringlist_contains(const char *strlist, int listlen, const char *str)
{
int len = strlen(str);
const char *p;
while (listlen >= len) {
if (memcmp(str, strlist, len+1) == 0)
return 1;
p = memchr(strlist, '\0', listlen);
if (!p)
return 0; /* malformed strlist.. */
listlen -= (p-strlist) + 1;
strlist = p + 1;
}
return 0;
}
int fdt_node_check_compatible(const void *fdt, int nodeoffset,
const char *compatible)
{
const void *prop;
int len;
prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
if (!prop)
return len;
if (_stringlist_contains(prop, len, compatible))
return 0;
else
return 1;
}
int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
const char *compatible)
{
int offset, err;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_node_check_compatible(), then if
* that didn't find what we want, we scan over them again
* making our way to the next node. Still it's the easiest to
* implement approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
err = fdt_node_check_compatible(fdt, offset, compatible);
if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
return err;
else if (err == 0)
return offset;
}
return offset; /* error from fdt_next_node() */
}
| gpl-2.0 |
SlimRoms/kernel_motorola_msm8226 | arch/powerpc/kernel/udbg_16550.c | 9589 | 7570 | /*
* udbg for NS16550 compatible serial ports
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/reg_a2.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
extern u8 real_205_readb(volatile u8 __iomem *addr);
extern void real_205_writeb(u8 data, volatile u8 __iomem *addr);
struct NS16550 {
/* this struct must be packed */
unsigned char rbr; /* 0 */
unsigned char ier; /* 1 */
unsigned char fcr; /* 2 */
unsigned char lcr; /* 3 */
unsigned char mcr; /* 4 */
unsigned char lsr; /* 5 */
unsigned char msr; /* 6 */
unsigned char scr; /* 7 */
};
#define thr rbr
#define iir fcr
#define dll rbr
#define dlm ier
#define dlab lcr
#define LSR_DR 0x01 /* Data ready */
#define LSR_OE 0x02 /* Overrun */
#define LSR_PE 0x04 /* Parity error */
#define LSR_FE 0x08 /* Framing error */
#define LSR_BI 0x10 /* Break */
#define LSR_THRE 0x20 /* Xmit holding register empty */
#define LSR_TEMT 0x40 /* Xmitter empty */
#define LSR_ERR 0x80 /* Error */
#define LCR_DLAB 0x80
static struct NS16550 __iomem *udbg_comport;
static void udbg_550_flush(void)
{
if (udbg_comport) {
while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
static void udbg_550_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_550_putc('\r');
udbg_550_flush();
out_8(&udbg_comport->thr, c);
}
}
static int udbg_550_getc_poll(void)
{
if (udbg_comport) {
if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0)
return in_8(&udbg_comport->rbr);
else
return -1;
}
return -1;
}
static int udbg_550_getc(void)
{
if (udbg_comport) {
while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0)
/* wait for char */;
return in_8(&udbg_comport->rbr);
}
return -1;
}
void udbg_init_uart(void __iomem *comport, unsigned int speed,
unsigned int clock)
{
unsigned int dll, base_bauds;
if (clock == 0)
clock = 1843200;
if (speed == 0)
speed = 9600;
base_bauds = clock / 16;
dll = base_bauds / speed;
if (comport) {
udbg_comport = (struct NS16550 __iomem *)comport;
out_8(&udbg_comport->lcr, 0x00);
out_8(&udbg_comport->ier, 0xff);
out_8(&udbg_comport->ier, 0x00);
out_8(&udbg_comport->lcr, LCR_DLAB);
out_8(&udbg_comport->dll, dll & 0xff);
out_8(&udbg_comport->dlm, dll >> 8);
/* 8 data, 1 stop, no parity */
out_8(&udbg_comport->lcr, 0x03);
/* RTS/DTR */
out_8(&udbg_comport->mcr, 0x03);
/* Clear & enable FIFOs */
out_8(&udbg_comport->fcr ,0x07);
udbg_putc = udbg_550_putc;
udbg_flush = udbg_550_flush;
udbg_getc = udbg_550_getc;
udbg_getc_poll = udbg_550_getc_poll;
}
}
unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
struct NS16550 __iomem *port = comport;
old_lcr = in_8(&port->lcr);
/* select divisor latch registers. */
out_8(&port->lcr, LCR_DLAB);
/* now, read the divisor */
dll = in_8(&port->dll);
dlm = in_8(&port->dlm);
divisor = dlm << 8 | dll;
/* check prescaling */
if (in_8(&port->mcr) & 0x80)
prescaler = 4;
else
prescaler = 1;
/* restore the LCR */
out_8(&port->lcr, old_lcr);
/* calculate speed */
speed = (clock / prescaler) / (divisor * 16);
/* sanity check */
if (speed > (clock / 16))
speed = 9600;
return speed;
}
#ifdef CONFIG_PPC_MAPLE
void udbg_maple_real_flush(void)
{
if (udbg_comport) {
while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
void udbg_maple_real_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_maple_real_putc('\r');
udbg_maple_real_flush();
real_writeb(c, &udbg_comport->thr); eieio();
}
}
void __init udbg_init_maple_realmode(void)
{
udbg_comport = (struct NS16550 __iomem *)0xf40003f8;
udbg_putc = udbg_maple_real_putc;
udbg_flush = udbg_maple_real_flush;
udbg_getc = NULL;
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC_MAPLE */
#ifdef CONFIG_PPC_PASEMI
void udbg_pas_real_flush(void)
{
if (udbg_comport) {
while ((real_205_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
void udbg_pas_real_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_pas_real_putc('\r');
udbg_pas_real_flush();
real_205_writeb(c, &udbg_comport->thr); eieio();
}
}
void udbg_init_pas_realmode(void)
{
udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
udbg_putc = udbg_pas_real_putc;
udbg_flush = udbg_pas_real_flush;
udbg_getc = NULL;
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC_MAPLE */
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
static void udbg_44x_as1_flush(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
static void udbg_44x_as1_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_44x_as1_putc('\r');
udbg_44x_as1_flush();
as1_writeb(c, &udbg_comport->thr); eieio();
}
}
static int udbg_44x_as1_getc(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0)
; /* wait for char */
return as1_readb(&udbg_comport->rbr);
}
return -1;
}
void __init udbg_init_44x_as1(void)
{
udbg_comport =
(struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
udbg_putc = udbg_44x_as1_putc;
udbg_flush = udbg_44x_as1_flush;
udbg_getc = udbg_44x_as1_getc;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
#ifdef CONFIG_PPC_EARLY_DEBUG_40x
static void udbg_40x_real_flush(void)
{
if (udbg_comport) {
while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
static void udbg_40x_real_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_40x_real_putc('\r');
udbg_40x_real_flush();
real_writeb(c, &udbg_comport->thr); eieio();
}
}
static int udbg_40x_real_getc(void)
{
if (udbg_comport) {
while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0)
; /* wait for char */
return real_readb(&udbg_comport->rbr);
}
return -1;
}
void __init udbg_init_40x_realmode(void)
{
udbg_comport = (struct NS16550 __iomem *)
CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
udbg_putc = udbg_40x_real_putc;
udbg_flush = udbg_40x_real_flush;
udbg_getc = udbg_40x_real_getc;
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
static void udbg_wsp_flush(void)
{
if (udbg_comport) {
while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
/* wait for idle */;
}
}
static void udbg_wsp_putc(char c)
{
if (udbg_comport) {
if (c == '\n')
udbg_wsp_putc('\r');
udbg_wsp_flush();
writeb(c, &udbg_comport->thr); eieio();
}
}
static int udbg_wsp_getc(void)
{
if (udbg_comport) {
while ((readb(&udbg_comport->lsr) & LSR_DR) == 0)
; /* wait for char */
return readb(&udbg_comport->rbr);
}
return -1;
}
static int udbg_wsp_getc_poll(void)
{
if (udbg_comport)
if (readb(&udbg_comport->lsr) & LSR_DR)
return readb(&udbg_comport->rbr);
return -1;
}
void __init udbg_init_wsp(void)
{
udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT;
udbg_init_uart(udbg_comport, 57600, 50000000);
udbg_putc = udbg_wsp_putc;
udbg_flush = udbg_wsp_flush;
udbg_getc = udbg_wsp_getc;
udbg_getc_poll = udbg_wsp_getc_poll;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
| gpl-2.0 |
LEPT-Development/android_kernel_lge_msm8960 | drivers/video/aty/mach64_gx.c | 14709 | 20884 |
/*
* ATI Mach64 GX Support
*/
#include <linux/delay.h>
#include <linux/fb.h>
#include <asm/io.h>
#include <video/mach64.h>
#include "atyfb.h"
/* Definitions for the ICS 2595 == ATI 18818_1 Clockchip */
#define REF_FREQ_2595 1432 /* 14.33 MHz (exact 14.31818) */
#define REF_DIV_2595 46 /* really 43 on ICS 2595 !!! */
/* ohne Prescaler */
#define MAX_FREQ_2595 15938 /* 159.38 MHz (really 170.486) */
#define MIN_FREQ_2595 8000 /* 80.00 MHz ( 85.565) */
/* mit Prescaler 2, 4, 8 */
#define ABS_MIN_FREQ_2595 1000 /* 10.00 MHz (really 10.697) */
#define N_ADJ_2595 257
#define STOP_BITS_2595 0x1800
#define MIN_N_408 2
#define MIN_N_1703 6
#define MIN_M 2
#define MAX_M 30
#define MIN_N 35
#define MAX_N 255-8
/*
* Support Functions
*/
static void aty_dac_waste4(const struct atyfb_par *par)
{
(void) aty_ld_8(DAC_REGS, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
(void) aty_ld_8(DAC_REGS + 2, par);
}
static void aty_StrobeClock(const struct atyfb_par *par)
{
u8 tmp;
udelay(26);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, tmp | CLOCK_STROBE, par);
return;
}
/*
* IBM RGB514 DAC and Clock Chip
*/
static void aty_st_514(int offset, u8 val, const struct atyfb_par *par)
{
aty_st_8(DAC_CNTL, 1, par);
/* right addr byte */
aty_st_8(DAC_W_INDEX, offset & 0xff, par);
/* left addr byte */
aty_st_8(DAC_DATA, (offset >> 8) & 0xff, par);
aty_st_8(DAC_MASK, val, par);
aty_st_8(DAC_CNTL, 0, par);
}
static int aty_set_dac_514(const struct fb_info *info,
const union aty_pll *pll, u32 bpp, u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
static struct {
u8 pixel_dly;
u8 misc2_cntl;
u8 pixel_rep;
u8 pixel_cntl_index;
u8 pixel_cntl_v1;
} tab[3] = {
{
0, 0x41, 0x03, 0x71, 0x45}, /* 8 bpp */
{
0, 0x45, 0x04, 0x0c, 0x01}, /* 555 */
{
0, 0x45, 0x06, 0x0e, 0x00}, /* XRGB */
};
int i;
switch (bpp) {
case 8:
default:
i = 0;
break;
case 16:
i = 1;
break;
case 32:
i = 2;
break;
}
aty_st_514(0x90, 0x00, par); /* VRAM Mask Low */
aty_st_514(0x04, tab[i].pixel_dly, par); /* Horizontal Sync Control */
aty_st_514(0x05, 0x00, par); /* Power Management */
aty_st_514(0x02, 0x01, par); /* Misc Clock Control */
aty_st_514(0x71, tab[i].misc2_cntl, par); /* Misc Control 2 */
aty_st_514(0x0a, tab[i].pixel_rep, par); /* Pixel Format */
aty_st_514(tab[i].pixel_cntl_index, tab[i].pixel_cntl_v1, par);
/* Misc Control 2 / 16 BPP Control / 32 BPP Control */
return 0;
}
static int aty_var_to_pll_514(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
/*
* FIXME: use real calculations instead of using fixed values from the old
* driver
*/
static struct {
u32 limit; /* pixlock rounding limit (arbitrary) */
u8 m; /* (df<<6) | vco_div_count */
u8 n; /* ref_div_count */
} RGB514_clocks[7] = {
{
8000, (3 << 6) | 20, 9}, /* 7395 ps / 135.2273 MHz */
{
10000, (1 << 6) | 19, 3}, /* 9977 ps / 100.2273 MHz */
{
13000, (1 << 6) | 2, 3}, /* 12509 ps / 79.9432 MHz */
{
14000, (2 << 6) | 8, 7}, /* 13394 ps / 74.6591 MHz */
{
16000, (1 << 6) | 44, 6}, /* 15378 ps / 65.0284 MHz */
{
25000, (1 << 6) | 15, 5}, /* 17460 ps / 57.2727 MHz */
{
50000, (0 << 6) | 53, 7}, /* 33145 ps / 30.1705 MHz */
};
int i;
for (i = 0; i < ARRAY_SIZE(RGB514_clocks); i++)
if (vclk_per <= RGB514_clocks[i].limit) {
pll->ibm514.m = RGB514_clocks[i].m;
pll->ibm514.n = RGB514_clocks[i].n;
return 0;
}
return -EINVAL;
}
static u32 aty_pll_514_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u8 df, vco_div_count, ref_div_count;
df = pll->ibm514.m >> 6;
vco_div_count = pll->ibm514.m & 0x3f;
ref_div_count = pll->ibm514.n;
return ((par->ref_clk_per * ref_div_count) << (3 - df))/
(vco_div_count + 65);
}
static void aty_set_pll_514(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
aty_st_514(0x06, 0x02, par); /* DAC Operation */
aty_st_514(0x10, 0x01, par); /* PLL Control 1 */
aty_st_514(0x70, 0x01, par); /* Misc Control 1 */
aty_st_514(0x8f, 0x1f, par); /* PLL Ref. Divider Input */
aty_st_514(0x03, 0x00, par); /* Sync Control */
aty_st_514(0x05, 0x00, par); /* Power Management */
aty_st_514(0x20, pll->ibm514.m, par); /* F0 / M0 */
aty_st_514(0x21, pll->ibm514.n, par); /* F1 / N0 */
}
const struct aty_dac_ops aty_dac_ibm514 = {
.set_dac = aty_set_dac_514,
};
const struct aty_pll_ops aty_pll_ibm514 = {
.var_to_pll = aty_var_to_pll_514,
.pll_to_var = aty_pll_514_to_var,
.set_pll = aty_set_pll_514,
};
/*
* ATI 68860-B DAC
*/
static int aty_set_dac_ATI68860_B(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 gModeReg, devSetupRegA, temp, mask;
gModeReg = 0;
devSetupRegA = 0;
switch (bpp) {
case 8:
gModeReg = 0x83;
devSetupRegA =
0x60 | 0x00 /*(info->mach64DAC8Bit ? 0x00 : 0x01) */ ;
break;
case 15:
gModeReg = 0xA0;
devSetupRegA = 0x60;
break;
case 16:
gModeReg = 0xA1;
devSetupRegA = 0x60;
break;
case 24:
gModeReg = 0xC0;
devSetupRegA = 0x60;
break;
case 32:
gModeReg = 0xE3;
devSetupRegA = 0x60;
break;
}
if (!accel) {
gModeReg = 0x80;
devSetupRegA = 0x61;
}
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (temp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3,
par);
aty_st_8(DAC_REGS + 2, 0x1D, par);
aty_st_8(DAC_REGS + 3, gModeReg, par);
aty_st_8(DAC_REGS, 0x02, par);
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, temp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par);
if (info->fix.smem_len < ONE_MB)
mask = 0x04;
else if (info->fix.smem_len == ONE_MB)
mask = 0x08;
else
mask = 0x0C;
/* The following assumes that the BIOS has correctly set R7 of the
* Device Setup Register A at boot time.
*/
#define A860_DELAY_L 0x80
temp = aty_ld_8(DAC_REGS, par);
aty_st_8(DAC_REGS, (devSetupRegA | mask) | (temp & A860_DELAY_L),
par);
temp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (temp & ~(DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3)),
par);
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x47052100, par);
return 0;
}
const struct aty_dac_ops aty_dac_ati68860b = {
.set_dac = aty_set_dac_ATI68860_B,
};
/*
* AT&T 21C498 DAC
*/
static int aty_set_dac_ATT21C498(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 dotClock;
int muxmode = 0;
int DACMask = 0;
dotClock = 100000000 / pll->ics2595.period_in_ps;
switch (bpp) {
case 8:
if (dotClock > 8000) {
DACMask = 0x24;
muxmode = 1;
} else
DACMask = 0x04;
break;
case 15:
DACMask = 0x16;
break;
case 16:
DACMask = 0x36;
break;
case 24:
DACMask = 0xE6;
break;
case 32:
DACMask = 0xE6;
break;
}
if (1 /* info->mach64DAC8Bit */ )
DACMask |= 0x02;
aty_dac_waste4(par);
aty_st_8(DAC_REGS + 2, DACMask, par);
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x00072000, par);
return muxmode;
}
const struct aty_dac_ops aty_dac_att21c498 = {
.set_dac = aty_set_dac_ATT21C498,
};
/*
* ATI 18818 / ICS 2595 Clock Chip
*/
static int aty_var_to_pll_18818(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 MHz100; /* in 0.01 MHz */
u32 program_bits;
u32 post_divider;
/* Calculate the programming word */
MHz100 = 100000000 / vclk_per;
program_bits = -1;
post_divider = 1;
if (MHz100 > MAX_FREQ_2595) {
MHz100 = MAX_FREQ_2595;
return -EINVAL;
} else if (MHz100 < ABS_MIN_FREQ_2595) {
program_bits = 0; /* MHz100 = 257 */
return -EINVAL;
} else {
while (MHz100 < MIN_FREQ_2595) {
MHz100 *= 2;
post_divider *= 2;
}
}
MHz100 *= 1000;
MHz100 = (REF_DIV_2595 * MHz100) / REF_FREQ_2595;
MHz100 += 500; /* + 0.5 round */
MHz100 /= 1000;
if (program_bits == -1) {
program_bits = MHz100 - N_ADJ_2595;
switch (post_divider) {
case 1:
program_bits |= 0x0600;
break;
case 2:
program_bits |= 0x0400;
break;
case 4:
program_bits |= 0x0200;
break;
case 8:
default:
break;
}
}
program_bits |= STOP_BITS_2595;
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = post_divider;
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_18818_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_ICS2595_put1bit(u8 data, const struct atyfb_par *par)
{
u8 tmp;
data &= 0x01;
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
(tmp & ~0x04) | (data << 2), par);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (0 << 3),
par);
aty_StrobeClock(par);
tmp = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, (tmp & ~0x08) | (1 << 3),
par);
aty_StrobeClock(par);
return;
}
static void aty_set_pll18818(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
u32 i;
u8 old_clock_cntl;
u8 old_crtc_ext_disp;
old_clock_cntl = aty_ld_8(CLOCK_CNTL, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par);
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
mdelay(15); /* delay for 50 (15) ms */
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program the clock chip */
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 0, par); /* Strobe = 0 */
aty_StrobeClock(par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset, 1, par); /* Strobe = 0 */
aty_StrobeClock(par);
aty_ICS2595_put1bit(1, par); /* Send start bits */
aty_ICS2595_put1bit(0, par); /* Start bit */
aty_ICS2595_put1bit(0, par); /* Read / ~Write */
for (i = 0; i < 5; i++) { /* Location 0..4 */
aty_ICS2595_put1bit(locationAddr & 1, par);
locationAddr >>= 1;
}
for (i = 0; i < 8 + 1 + 2 + 2; i++) {
aty_ICS2595_put1bit(program_bits & 1, par);
program_bits >>= 1;
}
mdelay(1); /* delay for 1 ms */
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
old_clock_cntl | CLOCK_STROBE, par);
mdelay(50); /* delay for 50 (15) ms */
aty_st_8(CLOCK_CNTL + par->clk_wr_offset,
((pll->ics2595.locationAddr & 0x0F) | CLOCK_STROBE), par);
return;
}
const struct aty_pll_ops aty_pll_ati18818_1 = {
.var_to_pll = aty_var_to_pll_18818,
.pll_to_var = aty_pll_18818_to_var,
.set_pll = aty_set_pll18818,
};
/*
* STG 1703 Clock Chip
*/
static int aty_var_to_pll_1703(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u32 temp, tempB;
u16 remainder, preRemainder;
short divider = 0, tempA;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xE0;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
divider = 0;
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
divider += 0x20;
}
temp = (unsigned int) (mhz100);
temp = (unsigned int) (temp * (MIN_N_1703 + 2));
temp -= (short) (mach64RefFreq << 1);
tempA = MIN_N_1703;
preRemainder = 0xffff;
do {
tempB = temp;
remainder = tempB % mach64RefFreq;
tempB = tempB / mach64RefFreq;
if ((tempB & 0xffff) <= 127
&& (remainder <= preRemainder)) {
preRemainder = remainder;
divider &= ~0x1f;
divider |= tempA;
divider =
(divider & 0x00ff) +
((tempB & 0xff) << 8);
}
temp += mhz100;
tempA++;
} while (tempA <= (MIN_N_1703 << 1));
program_bits = divider;
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = divider; /* fuer nix */
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_1703_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_1703(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
char old_crtc_ext_disp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
aty_dac_waste4(par);
(void) aty_ld_8(DAC_REGS + 2, par);
aty_st_8(DAC_REGS + 2, (locationAddr << 1) + 0x20, par);
aty_st_8(DAC_REGS + 2, 0, par);
aty_st_8(DAC_REGS + 2, (program_bits & 0xFF00) >> 8, par);
aty_st_8(DAC_REGS + 2, (program_bits & 0xFF), par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_stg1703 = {
.var_to_pll = aty_var_to_pll_1703,
.pll_to_var = aty_pll_1703_to_var,
.set_pll = aty_set_pll_1703,
};
/*
* Chrontel 8398 Clock Chip
*/
static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 tempA, tempB, fOut, longMHz100, diff, preDiff;
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u16 m, n, k = 0, save_m, save_n, twoToKth;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
save_m = 0;
save_n = 0;
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xE0;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
longMHz100 = mhz100 * 256 / 100; /* 8 bit scale this */
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
k++;
}
twoToKth = 1 << k;
diff = 0;
preDiff = 0xFFFFFFFF;
for (m = MIN_M; m <= MAX_M; m++) {
for (n = MIN_N; n <= MAX_N; n++) {
tempA = 938356; /* 14.31818 * 65536 */
tempA *= (n + 8); /* 43..256 */
tempB = twoToKth * 256;
tempB *= (m + 2); /* 4..32 */
fOut = tempA / tempB; /* 8 bit scale */
if (longMHz100 > fOut)
diff = longMHz100 - fOut;
else
diff = fOut - longMHz100;
if (diff < preDiff) {
save_m = m;
save_n = n;
preDiff = diff;
}
}
}
program_bits = (k << 6) + (save_m) + (save_n << 8);
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = 0;
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_8398_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_8398(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
char old_crtc_ext_disp;
char tmp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
tmp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, tmp | DAC_EXT_SEL_RS2 | DAC_EXT_SEL_RS3, par);
aty_st_8(DAC_REGS, locationAddr, par);
aty_st_8(DAC_REGS + 1, (program_bits & 0xff00) >> 8, par);
aty_st_8(DAC_REGS + 1, (program_bits & 0xff), par);
tmp = aty_ld_8(DAC_CNTL, par);
aty_st_8(DAC_CNTL, (tmp & ~DAC_EXT_SEL_RS2) | DAC_EXT_SEL_RS3,
par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_ch8398 = {
.var_to_pll = aty_var_to_pll_8398,
.pll_to_var = aty_pll_8398_to_var,
.set_pll = aty_set_pll_8398,
};
/*
* AT&T 20C408 Clock Chip
*/
static int aty_var_to_pll_408(const struct fb_info *info, u32 vclk_per,
u32 bpp, union aty_pll *pll)
{
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
u32 temp, tempB;
u16 remainder, preRemainder;
short divider = 0, tempA;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
/* Calculate program word */
if (mhz100 == 0)
program_bits = 0xFF;
else {
if (mhz100 < mach64MinFreq)
mhz100 = mach64MinFreq;
if (mhz100 > mach64MaxFreq)
mhz100 = mach64MaxFreq;
while (mhz100 < (mach64MinFreq << 3)) {
mhz100 <<= 1;
divider += 0x40;
}
temp = (unsigned int) mhz100;
temp = (unsigned int) (temp * (MIN_N_408 + 2));
temp -= ((short) (mach64RefFreq << 1));
tempA = MIN_N_408;
preRemainder = 0xFFFF;
do {
tempB = temp;
remainder = tempB % mach64RefFreq;
tempB = tempB / mach64RefFreq;
if (((tempB & 0xFFFF) <= 255)
&& (remainder <= preRemainder)) {
preRemainder = remainder;
divider &= ~0x3f;
divider |= tempA;
divider =
(divider & 0x00FF) +
((tempB & 0xFF) << 8);
}
temp += mhz100;
tempA++;
} while (tempA <= 32);
program_bits = divider;
}
pll->ics2595.program_bits = program_bits;
pll->ics2595.locationAddr = 0;
pll->ics2595.post_divider = divider; /* fuer nix */
pll->ics2595.period_in_ps = vclk_per;
return 0;
}
static u32 aty_pll_408_to_var(const struct fb_info *info,
const union aty_pll *pll)
{
return (pll->ics2595.period_in_ps); /* default for now */
}
static void aty_set_pll_408(const struct fb_info *info,
const union aty_pll *pll)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 program_bits;
u32 locationAddr;
u8 tmpA, tmpB, tmpC;
char old_crtc_ext_disp;
old_crtc_ext_disp = aty_ld_8(CRTC_GEN_CNTL + 3, par);
aty_st_8(CRTC_GEN_CNTL + 3,
old_crtc_ext_disp | (CRTC_EXT_DISP_EN >> 24), par);
program_bits = pll->ics2595.program_bits;
locationAddr = pll->ics2595.locationAddr;
/* Program clock */
aty_dac_waste4(par);
tmpB = aty_ld_8(DAC_REGS + 2, par) | 1;
aty_dac_waste4(par);
aty_st_8(DAC_REGS + 2, tmpB, par);
tmpA = tmpB;
tmpC = tmpA;
tmpA |= 8;
tmpB = 1;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
udelay(400); /* delay for 400 us */
locationAddr = (locationAddr << 2) + 0x40;
tmpB = locationAddr;
tmpA = program_bits >> 8;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
tmpB = locationAddr + 1;
tmpA = (u8) program_bits;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
tmpB = locationAddr + 2;
tmpA = 0x77;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
udelay(400); /* delay for 400 us */
tmpA = tmpC & (~(1 | 8));
tmpB = 1;
aty_st_8(DAC_REGS, tmpB, par);
aty_st_8(DAC_REGS + 2, tmpA, par);
(void) aty_ld_8(DAC_REGS, par); /* Clear DAC Counter */
aty_st_8(CRTC_GEN_CNTL + 3, old_crtc_ext_disp, par);
return;
}
const struct aty_pll_ops aty_pll_att20c408 = {
.var_to_pll = aty_var_to_pll_408,
.pll_to_var = aty_pll_408_to_var,
.set_pll = aty_set_pll_408,
};
/*
* Unsupported DAC and Clock Chip
*/
static int aty_set_dac_unsupported(const struct fb_info *info,
const union aty_pll *pll, u32 bpp,
u32 accel)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
aty_st_le32(BUS_CNTL, 0x890e20f1, par);
aty_st_le32(DAC_CNTL, 0x47052100, par);
/* new in 2.2.3p1 from Geert. ???????? */
aty_st_le32(BUS_CNTL, 0x590e10ff, par);
aty_st_le32(DAC_CNTL, 0x47012100, par);
return 0;
}
static int dummy(void)
{
return 0;
}
const struct aty_dac_ops aty_dac_unsupported = {
.set_dac = aty_set_dac_unsupported,
};
const struct aty_pll_ops aty_pll_unsupported = {
.var_to_pll = (void *) dummy,
.pll_to_var = (void *) dummy,
.set_pll = (void *) dummy,
};
| gpl-2.0 |
nvertigo/AK-OnePone | drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c | 118 | 46966 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "MSM-VPE %s:%d " fmt, __func__, __LINE__
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/msm_ion.h>
#include <linux/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/iommu.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <media/media-entity.h>
#include <media/msmb_generic_buf_mgr.h>
#include <media/msmb_pproc.h>
#include "msm_vpe.h"
#include "msm_camera_io_util.h"
#define MSM_VPE_IDENT_TO_SESSION_ID(identity) ((identity >> 16) & 0xFFFF)
#define MSM_VPE_IDENT_TO_STREAM_ID(identity) (identity & 0xFFFF)
#define MSM_VPE_DRV_NAME "msm_vpe"
#define MSM_VPE_MAX_BUFF_QUEUE 16
#define CONFIG_MSM_VPE_DBG 0
#if CONFIG_MSM_VPE_DBG
#define VPE_DBG(fmt, args...) pr_err(fmt, ##args)
#else
#define VPE_DBG(fmt, args...) pr_debug(fmt, ##args)
#endif
static void vpe_mem_dump(const char * const name, const void * const addr,
int size)
{
char line_str[128], *p_str;
int i;
u32 *p = (u32 *) addr;
u32 data;
VPE_DBG("%s: (%s) %p %d\n", __func__, name, addr, size);
line_str[0] = '\0';
p_str = line_str;
for (i = 0; i < size/4; i++) {
if (i % 4 == 0) {
snprintf(p_str, 12, "%08x: ", (u32) p);
p_str += 10;
}
data = *p++;
snprintf(p_str, 12, "%08x ", data);
p_str += 9;
if ((i + 1) % 4 == 0) {
VPE_DBG("%s\n", line_str);
line_str[0] = '\0';
p_str = line_str;
}
}
if (line_str[0] != '\0')
VPE_DBG("%s\n", line_str);
}
static inline long long vpe_do_div(long long num, long long den)
{
do_div(num, den);
return num;
}
#define msm_dequeue(queue, member) ({ \
unsigned long flags; \
struct msm_device_queue *__q = (queue); \
struct msm_queue_cmd *qcmd = 0; \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
__q->len--; \
qcmd = list_first_entry(&__q->list, \
struct msm_queue_cmd, \
member); \
list_del_init(&qcmd->member); \
} \
spin_unlock_irqrestore(&__q->lock, flags); \
qcmd; \
})
static void msm_queue_init(struct msm_device_queue *queue, const char *name)
{
spin_lock_init(&queue->lock);
queue->len = 0;
queue->max = 0;
queue->name = name;
INIT_LIST_HEAD(&queue->list);
init_waitqueue_head(&queue->wait);
}
static struct msm_cam_clk_info vpe_clk_info[] = {
{"vpe_clk", 160000000},
{"vpe_pclk", -1},
};
static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev);
static void msm_enqueue(struct msm_device_queue *queue,
struct list_head *entry)
{
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
queue->len++;
if (queue->len > queue->max) {
queue->max = queue->len;
pr_debug("queue %s new max is %d\n", queue->name, queue->max);
}
list_add_tail(entry, &queue->list);
wake_up(&queue->wait);
VPE_DBG("woke up %s\n", queue->name);
spin_unlock_irqrestore(&queue->lock, flags);
}
static struct msm_vpe_buff_queue_info_t *msm_vpe_get_buff_queue_entry(
struct vpe_device *vpe_dev, uint32_t session_id, uint32_t stream_id)
{
uint32_t i = 0;
struct msm_vpe_buff_queue_info_t *buff_queue_info = NULL;
for (i = 0; i < vpe_dev->num_buffq; i++) {
if ((vpe_dev->buff_queue[i].used == 1) &&
(vpe_dev->buff_queue[i].session_id == session_id) &&
(vpe_dev->buff_queue[i].stream_id == stream_id)) {
buff_queue_info = &vpe_dev->buff_queue[i];
break;
}
}
if (buff_queue_info == NULL) {
pr_err("error buffer queue entry for sess:%d strm:%d not found\n",
session_id, stream_id);
}
return buff_queue_info;
}
static unsigned long msm_vpe_get_phy_addr(struct vpe_device *vpe_dev,
struct msm_vpe_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
uint8_t native_buff)
{
unsigned long phy_add = 0;
struct list_head *buff_head;
struct msm_vpe_buffer_map_list_t *buff, *save;
if (native_buff)
buff_head = &buff_queue_info->native_buff_head;
else
buff_head = &buff_queue_info->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
if (buff->map_info.buff_info.index == buff_index) {
phy_add = buff->map_info.phy_addr;
break;
}
}
return phy_add;
}
static unsigned long msm_vpe_queue_buffer_info(struct vpe_device *vpe_dev,
struct msm_vpe_buff_queue_info_t *buff_queue,
struct msm_vpe_buffer_info_t *buffer_info)
{
struct list_head *buff_head;
struct msm_vpe_buffer_map_list_t *buff, *save;
int rc = 0;
if (buffer_info->native_buff)
buff_head = &buff_queue->native_buff_head;
else
buff_head = &buff_queue->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
if (buff->map_info.buff_info.index == buffer_info->index) {
pr_err("error buffer index already queued\n");
return -EINVAL;
}
}
buff = kzalloc(
sizeof(struct msm_vpe_buffer_map_list_t), GFP_KERNEL);
if (!buff) {
pr_err("error allocating memory\n");
return -EINVAL;
}
buff->map_info.buff_info = *buffer_info;
buff->map_info.ion_handle = ion_import_dma_buf(vpe_dev->client,
buffer_info->fd);
if (IS_ERR_OR_NULL(buff->map_info.ion_handle)) {
pr_err("ION import failed\n");
goto queue_buff_error1;
}
rc = ion_map_iommu(vpe_dev->client, buff->map_info.ion_handle,
vpe_dev->domain_num, 0, SZ_4K, 0,
&buff->map_info.phy_addr,
&buff->map_info.len, 0, 0);
if (rc < 0) {
pr_err("ION mmap failed\n");
goto queue_buff_error2;
}
INIT_LIST_HEAD(&buff->entry);
list_add_tail(&buff->entry, buff_head);
return buff->map_info.phy_addr;
queue_buff_error2:
ion_unmap_iommu(vpe_dev->client, buff->map_info.ion_handle,
vpe_dev->domain_num, 0);
queue_buff_error1:
ion_free(vpe_dev->client, buff->map_info.ion_handle);
buff->map_info.ion_handle = NULL;
kzfree(buff);
return 0;
}
static void msm_vpe_dequeue_buffer_info(struct vpe_device *vpe_dev,
struct msm_vpe_buffer_map_list_t *buff)
{
ion_unmap_iommu(vpe_dev->client, buff->map_info.ion_handle,
vpe_dev->domain_num, 0);
ion_free(vpe_dev->client, buff->map_info.ion_handle);
buff->map_info.ion_handle = NULL;
list_del_init(&buff->entry);
kzfree(buff);
return;
}
static unsigned long msm_vpe_fetch_buffer_info(struct vpe_device *vpe_dev,
struct msm_vpe_buffer_info_t *buffer_info, uint32_t session_id,
uint32_t stream_id)
{
unsigned long phy_addr = 0;
struct msm_vpe_buff_queue_info_t *buff_queue_info;
uint8_t native_buff = buffer_info->native_buff;
buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
stream_id);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
session_id, stream_id);
return phy_addr;
}
phy_addr = msm_vpe_get_phy_addr(vpe_dev, buff_queue_info,
buffer_info->index, native_buff);
if ((phy_addr == 0) && (native_buff)) {
phy_addr = msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
buffer_info);
}
return phy_addr;
}
static int32_t msm_vpe_enqueue_buff_info_list(struct vpe_device *vpe_dev,
struct msm_vpe_stream_buff_info_t *stream_buff_info)
{
uint32_t j;
struct msm_vpe_buff_queue_info_t *buff_queue_info;
buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
(stream_buff_info->identity >> 16) & 0xFFFF,
stream_buff_info->identity & 0xFFFF);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
(stream_buff_info->identity >> 16) & 0xFFFF,
stream_buff_info->identity & 0xFFFF);
return -EINVAL;
}
for (j = 0; j < stream_buff_info->num_buffs; j++) {
msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
&stream_buff_info->buffer_info[j]);
}
return 0;
}
static int32_t msm_vpe_dequeue_buff_info_list(struct vpe_device *vpe_dev,
struct msm_vpe_buff_queue_info_t *buff_queue_info)
{
struct msm_vpe_buffer_map_list_t *buff, *save;
struct list_head *buff_head;
buff_head = &buff_queue_info->native_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
msm_vpe_dequeue_buffer_info(vpe_dev, buff);
}
buff_head = &buff_queue_info->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
msm_vpe_dequeue_buffer_info(vpe_dev, buff);
}
return 0;
}
static int32_t msm_vpe_add_buff_queue_entry(struct vpe_device *vpe_dev,
uint16_t session_id, uint16_t stream_id)
{
uint32_t i;
struct msm_vpe_buff_queue_info_t *buff_queue_info;
for (i = 0; i < vpe_dev->num_buffq; i++) {
if (vpe_dev->buff_queue[i].used == 0) {
buff_queue_info = &vpe_dev->buff_queue[i];
buff_queue_info->used = 1;
buff_queue_info->session_id = session_id;
buff_queue_info->stream_id = stream_id;
INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
return 0;
}
}
pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
session_id, stream_id);
return -EINVAL;
}
static int32_t msm_vpe_free_buff_queue_entry(struct vpe_device *vpe_dev,
uint32_t session_id, uint32_t stream_id)
{
struct msm_vpe_buff_queue_info_t *buff_queue_info;
buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
stream_id);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
session_id, stream_id);
return -EINVAL;
}
buff_queue_info->used = 0;
buff_queue_info->session_id = 0;
buff_queue_info->stream_id = 0;
INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
return 0;
}
static int32_t msm_vpe_create_buff_queue(struct vpe_device *vpe_dev,
uint32_t num_buffq)
{
struct msm_vpe_buff_queue_info_t *buff_queue;
buff_queue = kzalloc(
sizeof(struct msm_vpe_buff_queue_info_t) * num_buffq,
GFP_KERNEL);
if (!buff_queue) {
pr_err("Buff queue allocation failure\n");
return -ENOMEM;
}
if (vpe_dev->buff_queue) {
pr_err("Buff queue not empty\n");
kzfree(buff_queue);
return -EINVAL;
} else {
vpe_dev->buff_queue = buff_queue;
vpe_dev->num_buffq = num_buffq;
}
return 0;
}
static void msm_vpe_delete_buff_queue(struct vpe_device *vpe_dev)
{
uint32_t i;
for (i = 0; i < vpe_dev->num_buffq; i++) {
if (vpe_dev->buff_queue[i].used == 1) {
pr_err("Queue not free sessionid: %d, streamid: %d\n",
vpe_dev->buff_queue[i].session_id,
vpe_dev->buff_queue[i].stream_id);
msm_vpe_free_buff_queue_entry(vpe_dev,
vpe_dev->buff_queue[i].session_id,
vpe_dev->buff_queue[i].stream_id);
}
}
kzfree(vpe_dev->buff_queue);
vpe_dev->buff_queue = NULL;
vpe_dev->num_buffq = 0;
return;
}
void vpe_release_ion_client(struct kref *ref)
{
struct vpe_device *vpe_dev = container_of(ref,
struct vpe_device, refcount);
ion_client_destroy(vpe_dev->client);
}
static int vpe_init_mem(struct vpe_device *vpe_dev)
{
kref_init(&vpe_dev->refcount);
kref_get(&vpe_dev->refcount);
vpe_dev->client = msm_ion_client_create(-1, "vpe");
if (!vpe_dev->client) {
pr_err("couldn't create ion client\n");
return -ENODEV;
}
return 0;
}
static void vpe_deinit_mem(struct vpe_device *vpe_dev)
{
kref_put(&vpe_dev->refcount, vpe_release_ion_client);
}
static irqreturn_t msm_vpe_irq(int irq_num, void *data)
{
unsigned long flags;
uint32_t irq_status;
struct msm_vpe_tasklet_queue_cmd *queue_cmd;
struct vpe_device *vpe_dev = (struct vpe_device *) data;
irq_status = msm_camera_io_r_mb(vpe_dev->base +
VPE_INTR_STATUS_OFFSET);
spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
queue_cmd = &vpe_dev->tasklet_queue_cmd[vpe_dev->taskletq_idx];
if (queue_cmd->cmd_used) {
VPE_DBG("%s: vpe tasklet queue overflow\n", __func__);
list_del(&queue_cmd->list);
} else {
atomic_add(1, &vpe_dev->irq_cnt);
}
queue_cmd->irq_status = irq_status;
queue_cmd->cmd_used = 1;
vpe_dev->taskletq_idx =
(vpe_dev->taskletq_idx + 1) % MSM_VPE_TASKLETQ_SIZE;
list_add_tail(&queue_cmd->list, &vpe_dev->tasklet_q);
spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
tasklet_schedule(&vpe_dev->vpe_tasklet);
msm_camera_io_w_mb(irq_status, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
VPE_DBG("%s: irq_status=0x%x.\n", __func__, irq_status);
return IRQ_HANDLED;
}
static void msm_vpe_do_tasklet(unsigned long data)
{
unsigned long flags;
struct vpe_device *vpe_dev = (struct vpe_device *)data;
struct msm_vpe_tasklet_queue_cmd *queue_cmd;
while (atomic_read(&vpe_dev->irq_cnt)) {
spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
queue_cmd = list_first_entry(&vpe_dev->tasklet_q,
struct msm_vpe_tasklet_queue_cmd, list);
if (!queue_cmd) {
atomic_set(&vpe_dev->irq_cnt, 0);
spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
return;
}
atomic_sub(1, &vpe_dev->irq_cnt);
list_del(&queue_cmd->list);
queue_cmd->cmd_used = 0;
spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
VPE_DBG("Frame done!!\n");
msm_vpe_notify_frame_done(vpe_dev);
}
}
static int vpe_init_hardware(struct vpe_device *vpe_dev)
{
int rc = 0;
if (vpe_dev->fs_vpe == NULL) {
vpe_dev->fs_vpe =
regulator_get(&vpe_dev->pdev->dev, "vdd");
if (IS_ERR(vpe_dev->fs_vpe)) {
pr_err("Regulator vpe vdd get failed %ld\n",
PTR_ERR(vpe_dev->fs_vpe));
vpe_dev->fs_vpe = NULL;
rc = -ENODEV;
goto fail;
} else if (regulator_enable(vpe_dev->fs_vpe)) {
pr_err("Regulator vpe vdd enable failed\n");
regulator_put(vpe_dev->fs_vpe);
vpe_dev->fs_vpe = NULL;
rc = -ENODEV;
goto fail;
}
}
rc = msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
if (rc < 0) {
rc = -ENODEV;
pr_err("clk enable failed\n");
goto disable_and_put_regulator;
}
vpe_dev->base = ioremap(vpe_dev->mem->start,
resource_size(vpe_dev->mem));
if (!vpe_dev->base) {
rc = -ENOMEM;
pr_err("ioremap failed\n");
goto disable_and_put_regulator;
}
if (vpe_dev->state != VPE_STATE_BOOT) {
rc = request_irq(vpe_dev->irq->start, msm_vpe_irq,
IRQF_TRIGGER_RISING,
"vpe", vpe_dev);
if (rc < 0) {
pr_err("irq request fail! start=%u\n",
vpe_dev->irq->start);
rc = -EBUSY;
goto unmap_base;
} else {
VPE_DBG("Got irq! %d\n", vpe_dev->irq->start);
}
} else {
VPE_DBG("Skip requesting the irq since device is booting\n");
}
vpe_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
msm_vpe_create_buff_queue(vpe_dev, MSM_VPE_MAX_BUFF_QUEUE);
return rc;
unmap_base:
iounmap(vpe_dev->base);
disable_and_put_regulator:
regulator_disable(vpe_dev->fs_vpe);
regulator_put(vpe_dev->fs_vpe);
fail:
return rc;
}
static int vpe_release_hardware(struct vpe_device *vpe_dev)
{
if (vpe_dev->state != VPE_STATE_BOOT) {
free_irq(vpe_dev->irq->start, vpe_dev);
tasklet_kill(&vpe_dev->vpe_tasklet);
atomic_set(&vpe_dev->irq_cnt, 0);
}
msm_vpe_delete_buff_queue(vpe_dev);
iounmap(vpe_dev->base);
msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
return 0;
}
static int vpe_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
int rc = 0;
uint32_t i;
struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
mutex_lock(&vpe_dev->mutex);
if (vpe_dev->vpe_open_cnt == MAX_ACTIVE_VPE_INSTANCE) {
pr_err("No free VPE instance\n");
rc = -ENODEV;
goto err_mutex_unlock;
}
for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
if (vpe_dev->vpe_subscribe_list[i].active == 0) {
vpe_dev->vpe_subscribe_list[i].active = 1;
vpe_dev->vpe_subscribe_list[i].vfh = &fh->vfh;
break;
}
}
if (i == MAX_ACTIVE_VPE_INSTANCE) {
pr_err("No free instance\n");
rc = -ENODEV;
goto err_mutex_unlock;
}
VPE_DBG("open %d %p\n", i, &fh->vfh);
vpe_dev->vpe_open_cnt++;
if (vpe_dev->vpe_open_cnt == 1) {
rc = vpe_init_hardware(vpe_dev);
if (rc < 0) {
pr_err("%s: Couldn't init vpe hardware\n", __func__);
vpe_dev->vpe_open_cnt--;
rc = -ENODEV;
goto err_fixup_sub_list;
}
rc = vpe_init_mem(vpe_dev);
if (rc < 0) {
pr_err("%s: Couldn't init mem\n", __func__);
vpe_dev->vpe_open_cnt--;
rc = -ENODEV;
goto err_release_hardware;
}
vpe_dev->state = VPE_STATE_IDLE;
}
mutex_unlock(&vpe_dev->mutex);
return rc;
err_release_hardware:
vpe_release_hardware(vpe_dev);
err_fixup_sub_list:
for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
vpe_dev->vpe_subscribe_list[i].active = 0;
vpe_dev->vpe_subscribe_list[i].vfh = NULL;
break;
}
}
err_mutex_unlock:
mutex_unlock(&vpe_dev->mutex);
return rc;
}
static int vpe_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
uint32_t i;
struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
mutex_lock(&vpe_dev->mutex);
for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
vpe_dev->vpe_subscribe_list[i].active = 0;
vpe_dev->vpe_subscribe_list[i].vfh = NULL;
break;
}
}
if (i == MAX_ACTIVE_VPE_INSTANCE) {
pr_err("Invalid close\n");
mutex_unlock(&vpe_dev->mutex);
return -ENODEV;
}
VPE_DBG("close %d %p\n", i, &fh->vfh);
vpe_dev->vpe_open_cnt--;
if (vpe_dev->vpe_open_cnt == 0) {
vpe_deinit_mem(vpe_dev);
vpe_release_hardware(vpe_dev);
vpe_dev->state = VPE_STATE_OFF;
}
mutex_unlock(&vpe_dev->mutex);
return 0;
}
static const struct v4l2_subdev_internal_ops msm_vpe_internal_ops = {
.open = vpe_open_node,
.close = vpe_close_node,
};
static int msm_vpe_buffer_ops(struct vpe_device *vpe_dev,
uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
{
int rc = -EINVAL;
rc = v4l2_subdev_call(vpe_dev->buf_mgr_subdev, core, ioctl,
buff_mgr_ops, buff_mgr_info);
if (rc < 0)
pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
return rc;
}
static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev)
{
struct v4l2_event v4l2_evt;
struct msm_queue_cmd *frame_qcmd;
struct msm_queue_cmd *event_qcmd;
struct msm_vpe_frame_info_t *processed_frame;
struct msm_device_queue *queue = &vpe_dev->processing_q;
struct msm_buf_mngr_info buff_mgr_info;
int rc = 0;
if (queue->len > 0) {
frame_qcmd = msm_dequeue(queue, list_frame);
if(frame_qcmd) {
processed_frame = frame_qcmd->command;
do_gettimeofday(&(processed_frame->out_time));
kfree(frame_qcmd);
event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
if (!event_qcmd) {
pr_err("%s: Insufficient memory\n", __func__);
return -ENOMEM;
}
atomic_set(&event_qcmd->on_heap, 1);
event_qcmd->command = processed_frame;
VPE_DBG("fid %d\n", processed_frame->frame_id);
msm_enqueue(&vpe_dev->eventData_q, &event_qcmd->list_eventdata);
if (!processed_frame->output_buffer_info.processed_divert) {
memset(&buff_mgr_info, 0 ,
sizeof(buff_mgr_info));
buff_mgr_info.session_id =
((processed_frame->identity >> 16) & 0xFFFF);
buff_mgr_info.stream_id =
(processed_frame->identity & 0xFFFF);
buff_mgr_info.frame_id = processed_frame->frame_id;
buff_mgr_info.timestamp = processed_frame->timestamp;
buff_mgr_info.index =
processed_frame->output_buffer_info.index;
rc = msm_vpe_buffer_ops(vpe_dev,
VIDIOC_MSM_BUF_MNGR_BUF_DONE,
&buff_mgr_info);
if (rc < 0) {
pr_err("%s: error doing VIDIOC_MSM_BUF_MNGR_BUF_DONE\n",
__func__);
rc = -EINVAL;
}
}
v4l2_evt.id = processed_frame->inst_id;
v4l2_evt.type = V4L2_EVENT_VPE_FRAME_DONE;
v4l2_event_queue(vpe_dev->msm_sd.sd.devnode, &v4l2_evt);
}
else
rc = -EFAULT;
}
return rc;
}
static void vpe_update_scaler_params(struct vpe_device *vpe_dev,
struct msm_vpe_frame_strip_info strip_info)
{
uint32_t out_ROI_width, out_ROI_height;
uint32_t src_ROI_width, src_ROI_height;
/*
* phase_step_x, phase_step_y, phase_init_x and phase_init_y
* are represented in fixed-point, unsigned 3.29 format
*/
uint32_t phase_step_x = 0;
uint32_t phase_step_y = 0;
uint32_t phase_init_x = 0;
uint32_t phase_init_y = 0;
uint32_t src_roi, src_x, src_y, src_xy, temp;
uint32_t yscale_filter_sel, xscale_filter_sel;
uint32_t scale_unit_sel_x, scale_unit_sel_y;
uint64_t numerator, denominator;
/*
* assumption is both direction need zoom. this can be
* improved.
*/
temp = msm_camera_io_r(vpe_dev->base + VPE_OP_MODE_OFFSET) | 0x3;
msm_camera_io_w(temp, vpe_dev->base + VPE_OP_MODE_OFFSET);
src_ROI_width = strip_info.src_w;
src_ROI_height = strip_info.src_h;
out_ROI_width = strip_info.dst_w;
out_ROI_height = strip_info.dst_h;
VPE_DBG("src w = %u, h=%u, dst w = %u, h =%u.\n",
src_ROI_width, src_ROI_height, out_ROI_width,
out_ROI_height);
src_roi = (src_ROI_height << 16) + src_ROI_width;
msm_camera_io_w(src_roi, vpe_dev->base + VPE_SRC_SIZE_OFFSET);
src_x = strip_info.src_x;
src_y = strip_info.src_y;
VPE_DBG("src_x = %d, src_y=%d.\n", src_x, src_y);
src_xy = src_y*(1<<16) + src_x;
msm_camera_io_w(src_xy, vpe_dev->base +
VPE_SRC_XY_OFFSET);
VPE_DBG("src_xy = 0x%x, src_roi=0x%x.\n", src_xy, src_roi);
/* decide whether to use FIR or M/N for scaling */
if ((out_ROI_width == 1 && src_ROI_width < 4) ||
(src_ROI_width < 4 * out_ROI_width - 3))
scale_unit_sel_x = 0;/* use FIR scalar */
else
scale_unit_sel_x = 1;/* use M/N scalar */
if ((out_ROI_height == 1 && src_ROI_height < 4) ||
(src_ROI_height < 4 * out_ROI_height - 3))
scale_unit_sel_y = 0;/* use FIR scalar */
else
scale_unit_sel_y = 1;/* use M/N scalar */
/* calculate phase step for the x direction */
/*
* if destination is only 1 pixel wide, the value of
* phase_step_x is unimportant. Assigning phase_step_x to src
* ROI width as an arbitrary value.
*/
if (out_ROI_width == 1)
phase_step_x = (uint32_t) ((src_ROI_width) <<
SCALER_PHASE_BITS);
/* if using FIR scalar */
else if (scale_unit_sel_x == 0) {
/*
* Calculate the quotient ( src_ROI_width - 1 ) (
* out_ROI_width - 1) with u3.29 precision. Quotient
* is rounded up to the larger 29th decimal point
*/
numerator = (uint64_t)(src_ROI_width - 1) <<
SCALER_PHASE_BITS;
/*
* never equals to 0 because of the "(out_ROI_width ==
* 1 )"
*/
denominator = (uint64_t)(out_ROI_width - 1);
/*
* divide and round up to the larger 29th decimal
* point.
*/
phase_step_x = (uint32_t) vpe_do_div((numerator +
denominator - 1), denominator);
} else if (scale_unit_sel_x == 1) { /* if M/N scalar */
/*
* Calculate the quotient ( src_ROI_width ) / (
* out_ROI_width) with u3.29 precision. Quotient is
* rounded down to the smaller 29th decimal point.
*/
numerator = (uint64_t)(src_ROI_width) <<
SCALER_PHASE_BITS;
denominator = (uint64_t)(out_ROI_width);
phase_step_x =
(uint32_t) vpe_do_div(numerator, denominator);
}
/* calculate phase step for the y direction */
/*
* if destination is only 1 pixel wide, the value of
* phase_step_x is unimportant. Assigning phase_step_x to src
* ROI width as an arbitrary value.
*/
if (out_ROI_height == 1)
phase_step_y =
(uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
/* if FIR scalar */
else if (scale_unit_sel_y == 0) {
/*
* Calculate the quotient ( src_ROI_height - 1 ) / (
* out_ROI_height - 1) with u3.29 precision. Quotient
* is rounded up to the larger 29th decimal point.
*/
numerator = (uint64_t)(src_ROI_height - 1) <<
SCALER_PHASE_BITS;
/*
* never equals to 0 because of the " ( out_ROI_height
* == 1 )" case
*/
denominator = (uint64_t)(out_ROI_height - 1);
/*
* Quotient is rounded up to the larger 29th decimal
* point.
*/
phase_step_y =
(uint32_t) vpe_do_div(
(numerator + denominator - 1), denominator);
} else if (scale_unit_sel_y == 1) { /* if M/N scalar */
/*
* Calculate the quotient ( src_ROI_height ) (
* out_ROI_height) with u3.29 precision. Quotient is
* rounded down to the smaller 29th decimal point.
*/
numerator = (uint64_t)(src_ROI_height) <<
SCALER_PHASE_BITS;
denominator = (uint64_t)(out_ROI_height);
phase_step_y = (uint32_t) vpe_do_div(
numerator, denominator);
}
/* decide which set of FIR coefficients to use */
if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
xscale_filter_sel = 0;
else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
xscale_filter_sel = 1;
else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
xscale_filter_sel = 2;
else
xscale_filter_sel = 3;
if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
yscale_filter_sel = 0;
else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
yscale_filter_sel = 1;
else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
yscale_filter_sel = 2;
else
yscale_filter_sel = 3;
/* calculate phase init for the x direction */
/* if using FIR scalar */
if (scale_unit_sel_x == 0) {
if (out_ROI_width == 1)
phase_init_x =
(uint32_t) ((src_ROI_width - 1) <<
SCALER_PHASE_BITS);
else
phase_init_x = 0;
} else if (scale_unit_sel_x == 1) /* M over N scalar */
phase_init_x = 0;
/*
* calculate phase init for the y direction if using FIR
* scalar
*/
if (scale_unit_sel_y == 0) {
if (out_ROI_height == 1)
phase_init_y =
(uint32_t) ((src_ROI_height -
1) << SCALER_PHASE_BITS);
else
phase_init_y = 0;
} else if (scale_unit_sel_y == 1) /* M over N scalar */
phase_init_y = 0;
strip_info.phase_step_x = phase_step_x;
strip_info.phase_step_y = phase_step_y;
strip_info.phase_init_x = phase_init_x;
strip_info.phase_init_y = phase_init_y;
VPE_DBG("phase step x = %d, step y = %d.\n",
strip_info.phase_step_x, strip_info.phase_step_y);
VPE_DBG("phase init x = %d, init y = %d.\n",
strip_info.phase_init_x, strip_info.phase_init_y);
msm_camera_io_w(strip_info.phase_step_x, vpe_dev->base +
VPE_SCALE_PHASEX_STEP_OFFSET);
msm_camera_io_w(strip_info.phase_step_y, vpe_dev->base +
VPE_SCALE_PHASEY_STEP_OFFSET);
msm_camera_io_w(strip_info.phase_init_x, vpe_dev->base +
VPE_SCALE_PHASEX_INIT_OFFSET);
msm_camera_io_w(strip_info.phase_init_y, vpe_dev->base +
VPE_SCALE_PHASEY_INIT_OFFSET);
}
static void vpe_program_buffer_addresses(
struct vpe_device *vpe_dev,
unsigned long srcP0,
unsigned long srcP1,
unsigned long outP0,
unsigned long outP1)
{
VPE_DBG("%s VPE Configured with:\n"
"Src %x, %x Dest %x, %x",
__func__, (uint32_t)srcP0, (uint32_t)srcP1,
(uint32_t)outP0, (uint32_t)outP1);
msm_camera_io_w(srcP0, vpe_dev->base + VPE_SRCP0_ADDR_OFFSET);
msm_camera_io_w(srcP1, vpe_dev->base + VPE_SRCP1_ADDR_OFFSET);
msm_camera_io_w(outP0, vpe_dev->base + VPE_OUTP0_ADDR_OFFSET);
msm_camera_io_w(outP1, vpe_dev->base + VPE_OUTP1_ADDR_OFFSET);
}
static int vpe_start(struct vpe_device *vpe_dev)
{
/* enable the frame irq, bit 0 = Display list 0 ROI done */
msm_camera_io_w_mb(1, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
msm_camera_io_dump(vpe_dev->base, 0x120);
msm_camera_io_dump(vpe_dev->base + 0x00400, 0x18);
msm_camera_io_dump(vpe_dev->base + 0x10000, 0x250);
msm_camera_io_dump(vpe_dev->base + 0x30000, 0x20);
msm_camera_io_dump(vpe_dev->base + 0x50000, 0x30);
msm_camera_io_dump(vpe_dev->base + 0x50400, 0x10);
/*
* This triggers the operation. When the VPE is done,
* msm_vpe_irq will fire.
*/
msm_camera_io_w_mb(1, vpe_dev->base + VPE_DL0_START_OFFSET);
return 0;
}
static void vpe_config_axi_default(struct vpe_device *vpe_dev)
{
msm_camera_io_w(0x25, vpe_dev->base + VPE_AXI_ARB_2_OFFSET);
}
static int vpe_reset(struct vpe_device *vpe_dev)
{
uint32_t vpe_version;
uint32_t rc = 0;
vpe_version = msm_camera_io_r(
vpe_dev->base + VPE_HW_VERSION_OFFSET);
VPE_DBG("vpe_version = 0x%x\n", vpe_version);
/* disable all interrupts.*/
msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
/* clear all pending interrupts*/
msm_camera_io_w(0x1fffff, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
/* write sw_reset to reset the core. */
msm_camera_io_w(0x10, vpe_dev->base + VPE_SW_RESET_OFFSET);
/* then poll the reset bit, it should be self-cleared. */
while (1) {
rc = msm_camera_io_r(vpe_dev->base + VPE_SW_RESET_OFFSET) \
& 0x10;
if (rc == 0)
break;
cpu_relax();
}
/*
* at this point, hardware is reset. Then pogram to default
* values.
*/
msm_camera_io_w(VPE_AXI_RD_ARB_CONFIG_VALUE,
vpe_dev->base + VPE_AXI_RD_ARB_CONFIG_OFFSET);
msm_camera_io_w(VPE_CGC_ENABLE_VALUE,
vpe_dev->base + VPE_CGC_EN_OFFSET);
msm_camera_io_w(1, vpe_dev->base + VPE_CMD_MODE_OFFSET);
msm_camera_io_w(VPE_DEFAULT_OP_MODE_VALUE,
vpe_dev->base + VPE_OP_MODE_OFFSET);
msm_camera_io_w(VPE_DEFAULT_SCALE_CONFIG,
vpe_dev->base + VPE_SCALE_CONFIG_OFFSET);
vpe_config_axi_default(vpe_dev);
return rc;
}
static void vpe_update_scale_coef(struct vpe_device *vpe_dev, uint32_t *p)
{
uint32_t i, offset;
offset = *p;
for (i = offset; i < (VPE_SCALE_COEFF_NUM + offset); i++) {
VPE_DBG("Setting scale table %d\n", i);
msm_camera_io_w(*(++p),
vpe_dev->base + VPE_SCALE_COEFF_LSBn(i));
msm_camera_io_w(*(++p),
vpe_dev->base + VPE_SCALE_COEFF_MSBn(i));
}
}
static void vpe_input_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
{
msm_camera_io_w(*p, vpe_dev->base + VPE_SRC_FORMAT_OFFSET);
msm_camera_io_w(*(++p),
vpe_dev->base + VPE_SRC_UNPACK_PATTERN1_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_IMAGE_SIZE_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_YSTRIDE1_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_SIZE_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_XY_OFFSET);
}
static void vpe_output_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
{
msm_camera_io_w(*p, vpe_dev->base + VPE_OUT_FORMAT_OFFSET);
msm_camera_io_w(*(++p),
vpe_dev->base + VPE_OUT_PACK_PATTERN1_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_YSTRIDE1_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_SIZE_OFFSET);
msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_XY_OFFSET);
}
static void vpe_operation_config(struct vpe_device *vpe_dev, uint32_t *p)
{
msm_camera_io_w(*p, vpe_dev->base + VPE_OP_MODE_OFFSET);
}
/**
* msm_vpe_transaction_setup() - send setup for one frame to VPE
* @vpe_dev: vpe device
* @data: packed setup commands
*
* See msm_vpe.h for the expected format of `data'
*/
static void msm_vpe_transaction_setup(struct vpe_device *vpe_dev, void *data)
{
int i;
void *iter = data;
vpe_mem_dump("vpe_transaction", data, VPE_TRANSACTION_SETUP_CONFIG_LEN);
for (i = 0; i < VPE_NUM_SCALER_TABLES; ++i) {
vpe_update_scale_coef(vpe_dev, (uint32_t *)iter);
iter += VPE_SCALER_CONFIG_LEN;
}
vpe_input_plane_config(vpe_dev, (uint32_t *)iter);
iter += VPE_INPUT_PLANE_CFG_LEN;
vpe_output_plane_config(vpe_dev, (uint32_t *)iter);
iter += VPE_OUTPUT_PLANE_CFG_LEN;
vpe_operation_config(vpe_dev, (uint32_t *)iter);
}
static int msm_vpe_send_frame_to_hardware(struct vpe_device *vpe_dev,
struct msm_queue_cmd *frame_qcmd)
{
struct msm_vpe_frame_info_t *process_frame;
if (vpe_dev->processing_q.len < MAX_VPE_PROCESSING_FRAME) {
process_frame = frame_qcmd->command;
msm_enqueue(&vpe_dev->processing_q,
&frame_qcmd->list_frame);
vpe_update_scaler_params(vpe_dev, process_frame->strip_info);
vpe_program_buffer_addresses(
vpe_dev,
process_frame->src_phyaddr,
process_frame->src_phyaddr
+ process_frame->src_chroma_plane_offset,
process_frame->dest_phyaddr,
process_frame->dest_phyaddr
+ process_frame->dest_chroma_plane_offset);
vpe_start(vpe_dev);
do_gettimeofday(&(process_frame->in_time));
}
return 0;
}
static int msm_vpe_cfg(struct vpe_device *vpe_dev,
struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
{
int rc = 0;
struct msm_queue_cmd *frame_qcmd = NULL;
struct msm_vpe_frame_info_t *new_frame =
kzalloc(sizeof(struct msm_vpe_frame_info_t), GFP_KERNEL);
unsigned long in_phyaddr, out_phyaddr;
struct msm_buf_mngr_info buff_mgr_info;
if (!new_frame) {
pr_err("Insufficient memory. return\n");
return -ENOMEM;
}
rc = copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
sizeof(struct msm_vpe_frame_info_t));
if (rc) {
pr_err("%s:%d copy from user\n", __func__, __LINE__);
rc = -EINVAL;
goto err_free_new_frame;
}
in_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
&new_frame->input_buffer_info,
((new_frame->identity >> 16) & 0xFFFF),
(new_frame->identity & 0xFFFF));
if (!in_phyaddr) {
pr_err("error gettting input physical address\n");
rc = -EINVAL;
goto err_free_new_frame;
}
memset(&new_frame->output_buffer_info, 0,
sizeof(struct msm_vpe_buffer_info_t));
memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
rc = msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
&buff_mgr_info);
if (rc < 0) {
pr_err("error getting buffer\n");
rc = -EINVAL;
goto err_free_new_frame;
}
new_frame->output_buffer_info.index = buff_mgr_info.index;
out_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
&new_frame->output_buffer_info,
((new_frame->identity >> 16) & 0xFFFF),
(new_frame->identity & 0xFFFF));
if (!out_phyaddr) {
pr_err("error gettting output physical address\n");
rc = -EINVAL;
goto err_put_buf;
}
new_frame->src_phyaddr = in_phyaddr;
new_frame->dest_phyaddr = out_phyaddr;
frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
if (!frame_qcmd) {
pr_err("Insufficient memory. return\n");
rc = -ENOMEM;
goto err_put_buf;
}
atomic_set(&frame_qcmd->on_heap, 1);
frame_qcmd->command = new_frame;
rc = msm_vpe_send_frame_to_hardware(vpe_dev, frame_qcmd);
if (rc < 0) {
pr_err("error cannot send frame to hardware\n");
rc = -EINVAL;
goto err_free_frame_qcmd;
}
return rc;
err_free_frame_qcmd:
kfree(frame_qcmd);
err_put_buf:
msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
&buff_mgr_info);
err_free_new_frame:
kfree(new_frame);
return rc;
}
static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
int rc = 0;
mutex_lock(&vpe_dev->mutex);
switch (cmd) {
case VIDIOC_MSM_VPE_TRANSACTION_SETUP: {
struct msm_vpe_transaction_setup_cfg *cfg;
VPE_DBG("VIDIOC_MSM_VPE_TRANSACTION_SETUP\n");
if (sizeof(*cfg) != ioctl_ptr->len) {
pr_err("%s: size mismatch cmd=%d, len=%d, expected=%d",
__func__, cmd, ioctl_ptr->len,
sizeof(*cfg));
rc = -EINVAL;
break;
}
cfg = kzalloc(ioctl_ptr->len, GFP_KERNEL);
if (!cfg) {
pr_err("%s:%d: malloc error\n", __func__, __LINE__);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
rc = copy_from_user(cfg, (void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len);
if (rc) {
pr_err("%s:%d copy from user\n", __func__, __LINE__);
kfree(cfg);
break;
}
msm_vpe_transaction_setup(vpe_dev, (void *)cfg);
kfree(cfg);
break;
}
case VIDIOC_MSM_VPE_CFG: {
VPE_DBG("VIDIOC_MSM_VPE_CFG\n");
rc = msm_vpe_cfg(vpe_dev, ioctl_ptr);
break;
}
case VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO: {
struct msm_vpe_stream_buff_info_t *u_stream_buff_info;
struct msm_vpe_stream_buff_info_t k_stream_buff_info;
VPE_DBG("VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO\n");
if (sizeof(struct msm_vpe_stream_buff_info_t) !=
ioctl_ptr->len) {
pr_err("%s:%d: invalid length\n", __func__, __LINE__);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
if (!u_stream_buff_info) {
pr_err("%s:%d: malloc error\n", __func__, __LINE__);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(u_stream_buff_info,
(void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len) ? -EFAULT : 0);
if (rc) {
pr_err("%s:%d copy from user\n", __func__, __LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
if ((u_stream_buff_info->num_buffs == 0) ||
(u_stream_buff_info->num_buffs >
MSM_CAMERA_MAX_STREAM_BUF)) {
pr_err("%s:%d: Invalid number of buffers\n", __func__,
__LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
k_stream_buff_info.identity = u_stream_buff_info->identity;
k_stream_buff_info.buffer_info =
kzalloc(k_stream_buff_info.num_buffs *
sizeof(struct msm_vpe_buffer_info_t), GFP_KERNEL);
if (!k_stream_buff_info.buffer_info) {
pr_err("%s:%d: malloc error\n", __func__, __LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(k_stream_buff_info.buffer_info,
(void __user *)u_stream_buff_info->buffer_info,
k_stream_buff_info.num_buffs *
sizeof(struct msm_vpe_buffer_info_t)) ?
-EFAULT : 0);
if (rc) {
pr_err("%s:%d copy from user\n", __func__, __LINE__);
kfree(k_stream_buff_info.buffer_info);
kfree(u_stream_buff_info);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
rc = msm_vpe_add_buff_queue_entry(vpe_dev,
((k_stream_buff_info.identity >> 16) & 0xFFFF),
(k_stream_buff_info.identity & 0xFFFF));
if (!rc)
rc = msm_vpe_enqueue_buff_info_list(vpe_dev,
&k_stream_buff_info);
kfree(k_stream_buff_info.buffer_info);
kfree(u_stream_buff_info);
break;
}
case VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO: {
uint32_t identity;
struct msm_vpe_buff_queue_info_t *buff_queue_info;
VPE_DBG("VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO\n");
if (ioctl_ptr->len != sizeof(uint32_t)) {
pr_err("%s:%d Invalid len\n", __func__, __LINE__);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(&identity,
(void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len) ? -EFAULT : 0);
if (rc) {
pr_err("%s:%d copy from user\n", __func__, __LINE__);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for identity:%d\n",
identity);
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
msm_vpe_dequeue_buff_info_list(vpe_dev, buff_queue_info);
rc = msm_vpe_free_buff_queue_entry(vpe_dev,
buff_queue_info->session_id,
buff_queue_info->stream_id);
break;
}
case VIDIOC_MSM_VPE_GET_EVENTPAYLOAD: {
struct msm_device_queue *queue = &vpe_dev->eventData_q;
struct msm_queue_cmd *event_qcmd;
struct msm_vpe_frame_info_t *process_frame;
VPE_DBG("VIDIOC_MSM_VPE_GET_EVENTPAYLOAD\n");
event_qcmd = msm_dequeue(queue, list_eventdata);
if (NULL == event_qcmd)
break;
process_frame = event_qcmd->command;
VPE_DBG("fid %d\n", process_frame->frame_id);
if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
process_frame,
sizeof(struct msm_vpe_frame_info_t))) {
mutex_unlock(&vpe_dev->mutex);
return -EINVAL;
}
kfree(process_frame);
kfree(event_qcmd);
break;
}
}
mutex_unlock(&vpe_dev->mutex);
return rc;
}
static int msm_vpe_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_subscribe(fh, sub, MAX_VPE_V4l2_EVENTS);
}
static int msm_vpe_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
static struct v4l2_subdev_core_ops msm_vpe_subdev_core_ops = {
.ioctl = msm_vpe_subdev_ioctl,
.subscribe_event = msm_vpe_subscribe_event,
.unsubscribe_event = msm_vpe_unsubscribe_event,
};
static const struct v4l2_subdev_ops msm_vpe_subdev_ops = {
.core = &msm_vpe_subdev_core_ops,
};
static struct v4l2_file_operations msm_vpe_v4l2_subdev_fops;
static long msm_vpe_subdev_do_ioctl(
struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
switch (cmd) {
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return -ENOIOCTLCMD;
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
case VIDIOC_SUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
case VIDIOC_UNSUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
case VIDIOC_MSM_VPE_GET_INST_INFO: {
uint32_t i;
struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
struct msm_vpe_frame_info_t inst_info;
memset(&inst_info, 0, sizeof(struct msm_vpe_frame_info_t));
for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
if (vpe_dev->vpe_subscribe_list[i].vfh == vfh) {
inst_info.inst_id = i;
break;
}
}
if (copy_to_user(
(void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
sizeof(struct msm_vpe_frame_info_t))) {
return -EINVAL;
}
}
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
return 0;
}
static long msm_vpe_subdev_fops_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return video_usercopy(file, cmd, arg, msm_vpe_subdev_do_ioctl);
}
static int vpe_register_domain(void)
{
struct msm_iova_partition vpe_iommu_partition = {
/* TODO: verify that these are correct? */
.start = SZ_128K,
.size = SZ_2G - SZ_128K,
};
struct msm_iova_layout vpe_iommu_layout = {
.partitions = &vpe_iommu_partition,
.npartitions = 1,
.client_name = "camera_vpe",
.domain_flags = 0,
};
return msm_register_domain(&vpe_iommu_layout);
}
static int __devinit vpe_probe(struct platform_device *pdev)
{
struct vpe_device *vpe_dev;
int rc = 0;
vpe_dev = kzalloc(sizeof(struct vpe_device), GFP_KERNEL);
if (!vpe_dev) {
pr_err("not enough memory\n");
return -ENOMEM;
}
vpe_dev->vpe_clk = kzalloc(sizeof(struct clk *) *
ARRAY_SIZE(vpe_clk_info), GFP_KERNEL);
if (!vpe_dev->vpe_clk) {
pr_err("not enough memory\n");
rc = -ENOMEM;
goto err_free_vpe_dev;
}
v4l2_subdev_init(&vpe_dev->msm_sd.sd, &msm_vpe_subdev_ops);
vpe_dev->msm_sd.sd.internal_ops = &msm_vpe_internal_ops;
snprintf(vpe_dev->msm_sd.sd.name, ARRAY_SIZE(vpe_dev->msm_sd.sd.name),
"vpe");
vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
v4l2_set_subdevdata(&vpe_dev->msm_sd.sd, vpe_dev);
platform_set_drvdata(pdev, &vpe_dev->msm_sd.sd);
mutex_init(&vpe_dev->mutex);
spin_lock_init(&vpe_dev->tasklet_lock);
vpe_dev->pdev = pdev;
vpe_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "vpe");
if (!vpe_dev->mem) {
pr_err("no mem resource?\n");
rc = -ENODEV;
goto err_free_vpe_clk;
}
vpe_dev->irq = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "vpe");
if (!vpe_dev->irq) {
pr_err("%s: no irq resource?\n", __func__);
rc = -ENODEV;
goto err_release_mem;
}
vpe_dev->domain_num = vpe_register_domain();
if (vpe_dev->domain_num < 0) {
pr_err("%s: could not register domain\n", __func__);
rc = -ENODEV;
goto err_release_mem;
}
vpe_dev->domain =
msm_get_iommu_domain(vpe_dev->domain_num);
if (!vpe_dev->domain) {
pr_err("%s: cannot find domain\n", __func__);
rc = -ENODEV;
goto err_release_mem;
}
vpe_dev->iommu_ctx_src = msm_iommu_get_ctx("vpe_src");
vpe_dev->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst");
if (!vpe_dev->iommu_ctx_src || !vpe_dev->iommu_ctx_dst) {
pr_err("%s: cannot get iommu_ctx\n", __func__);
rc = -ENODEV;
goto err_release_mem;
}
media_entity_init(&vpe_dev->msm_sd.sd.entity, 0, NULL, 0);
vpe_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
vpe_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_VPE;
vpe_dev->msm_sd.sd.entity.name = pdev->name;
msm_sd_register(&vpe_dev->msm_sd);
msm_vpe_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
msm_vpe_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
msm_vpe_v4l2_subdev_fops.unlocked_ioctl = msm_vpe_subdev_fops_ioctl;
msm_vpe_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
msm_vpe_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
vpe_dev->msm_sd.sd.devnode->fops = &msm_vpe_v4l2_subdev_fops;
vpe_dev->msm_sd.sd.entity.revision = vpe_dev->msm_sd.sd.devnode->num;
vpe_dev->state = VPE_STATE_BOOT;
rc = vpe_init_hardware(vpe_dev);
if (rc < 0) {
pr_err("%s: Couldn't init vpe hardware\n", __func__);
rc = -ENODEV;
goto err_unregister_sd;
}
vpe_reset(vpe_dev);
vpe_release_hardware(vpe_dev);
vpe_dev->state = VPE_STATE_OFF;
rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
if (rc < 0) {
pr_err("Couldn't attach to vpe_src context bank\n");
rc = -ENODEV;
goto err_unregister_sd;
}
rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
if (rc < 0) {
pr_err("Couldn't attach to vpe_dst context bank\n");
rc = -ENODEV;
goto err_detach_src;
}
vpe_dev->state = VPE_STATE_OFF;
msm_queue_init(&vpe_dev->eventData_q, "vpe-eventdata");
msm_queue_init(&vpe_dev->processing_q, "vpe-frame");
INIT_LIST_HEAD(&vpe_dev->tasklet_q);
tasklet_init(&vpe_dev->vpe_tasklet, msm_vpe_do_tasklet,
(unsigned long)vpe_dev);
vpe_dev->vpe_open_cnt = 0;
return rc;
err_detach_src:
iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
err_unregister_sd:
msm_sd_unregister(&vpe_dev->msm_sd);
err_release_mem:
release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
err_free_vpe_clk:
kfree(vpe_dev->vpe_clk);
err_free_vpe_dev:
kfree(vpe_dev);
return rc;
}
static int vpe_device_remove(struct platform_device *dev)
{
struct v4l2_subdev *sd = platform_get_drvdata(dev);
struct vpe_device *vpe_dev;
if (!sd) {
pr_err("%s: Subdevice is NULL\n", __func__);
return 0;
}
vpe_dev = (struct vpe_device *)v4l2_get_subdevdata(sd);
if (!vpe_dev) {
pr_err("%s: vpe device is NULL\n", __func__);
return 0;
}
iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
msm_sd_unregister(&vpe_dev->msm_sd);
release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
mutex_destroy(&vpe_dev->mutex);
kfree(vpe_dev);
return 0;
}
static struct platform_driver vpe_driver = {
.probe = vpe_probe,
.remove = __devexit_p(vpe_device_remove),
.driver = {
.name = MSM_VPE_DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init msm_vpe_init_module(void)
{
return platform_driver_register(&vpe_driver);
}
static void __exit msm_vpe_exit_module(void)
{
platform_driver_unregister(&vpe_driver);
}
module_init(msm_vpe_init_module);
module_exit(msm_vpe_exit_module);
MODULE_DESCRIPTION("MSM VPE driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
charles1018/kernel_blu_spark | arch/arm/mach-msm/qdsp5v2/adsp_driver.c | 374 | 13154 | /*
* Copyright (C) 2008 Google, Inc.
* Copyright (c) 2009, The Linux Foundation. All rights reserved.
* Author: Iliyan Malchev <ibm@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/msm_adsp.h>
#include <linux/export.h>
#include "adsp.h"
#include <mach/debug_mm.h>
#include <linux/slab.h>
struct adsp_pmem_info {
int fd;
void *vaddr;
};
struct adsp_pmem_region {
struct hlist_node list;
void *vaddr;
unsigned long paddr;
unsigned long kvaddr;
unsigned long len;
struct file *file;
};
struct adsp_device {
struct msm_adsp_module *module;
spinlock_t event_queue_lock;
wait_queue_head_t event_wait;
struct list_head event_queue;
int abort;
const char *name;
struct device *device;
struct cdev cdev;
};
static struct adsp_device *inode_to_device(struct inode *inode);
#define __CONTAINS(r, v, l) ({ \
typeof(r) __r = r; \
typeof(v) __v = v; \
typeof(v) __e = __v + l; \
int res = __v >= __r->vaddr && \
__e <= __r->vaddr + __r->len; \
res; \
})
#define CONTAINS(r1, r2) ({ \
typeof(r2) __r2 = r2; \
__CONTAINS(r1, __r2->vaddr, __r2->len); \
})
#define IN_RANGE(r, v) ({ \
typeof(r) __r = r; \
typeof(v) __vv = v; \
int res = ((__vv >= __r->vaddr) && \
(__vv < (__r->vaddr + __r->len))); \
res; \
})
#define OVERLAPS(r1, r2) ({ \
typeof(r1) __r1 = r1; \
typeof(r2) __r2 = r2; \
typeof(__r2->vaddr) __v = __r2->vaddr; \
typeof(__v) __e = __v + __r2->len - 1; \
int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
res; \
})
static int adsp_pmem_lookup_vaddr(struct msm_adsp_module *module, void **addr,
unsigned long len, struct adsp_pmem_region **region)
{
struct hlist_node *node;
void *vaddr = *addr;
struct adsp_pmem_region *region_elt;
int match_count = 0;
*region = NULL;
/* returns physical address or zero */
hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
if (vaddr >= region_elt->vaddr &&
vaddr < region_elt->vaddr + region_elt->len &&
vaddr + len <= region_elt->vaddr + region_elt->len) {
/* offset since we could pass vaddr inside a registerd
* pmem buffer
*/
match_count++;
if (!*region)
*region = region_elt;
}
}
if (match_count > 1) {
MM_ERR("module %s: "
"multiple hits for vaddr %p, len %ld\n",
module->name, vaddr, len);
hlist_for_each_entry(region_elt, node,
&module->pmem_regions, list) {
if (vaddr >= region_elt->vaddr &&
vaddr < region_elt->vaddr + region_elt->len &&
vaddr + len <= region_elt->vaddr + region_elt->len)
MM_ERR("%p, %ld --> %p\n",
region_elt->vaddr,
region_elt->len,
(void *)region_elt->paddr);
}
}
return *region ? 0 : -1;
}
int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr,
unsigned long *kvaddr, unsigned long len)
{
struct adsp_pmem_region *region;
void *vaddr = *addr;
unsigned long *paddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion);
if (ret) {
MM_ERR("not patching %s (paddr & kvaddr),"
" lookup (%p, %ld) failed\n",
module->name, vaddr, len);
return ret;
}
*paddr = region->paddr + (vaddr - region->vaddr);
*kvaddr = region->kvaddr + (vaddr - region->vaddr);
return 0;
}
int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr,
unsigned long len)
{
struct adsp_pmem_region *region;
void *vaddr = *addr;
unsigned long *paddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion);
if (ret) {
MM_ERR("not patching %s, lookup (%p, %ld) failed\n",
module->name, vaddr, len);
return ret;
}
*paddr = region->paddr + (vaddr - region->vaddr);
return 0;
}
static int adsp_verify_cmd(struct msm_adsp_module *module,
unsigned int queue_id, void *cmd_data,
size_t cmd_size)
{
/* call the per module verifier */
if (module->verify_cmd)
return module->verify_cmd(module, queue_id, cmd_data,
cmd_size);
else
MM_INFO("no packet verifying function "
"for task %s\n", module->name);
return 0;
}
static long adsp_write_cmd(struct adsp_device *adev, void __user *arg)
{
struct adsp_command_t cmd;
unsigned char buf[256];
void *cmd_data;
long rc;
if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd)))
return -EFAULT;
if (cmd.len > 256) {
cmd_data = kmalloc(cmd.len, GFP_USER);
if (!cmd_data)
return -ENOMEM;
} else {
cmd_data = buf;
}
if (copy_from_user(cmd_data, (void __user *)(cmd.data), cmd.len)) {
rc = -EFAULT;
goto end;
}
mutex_lock(&adev->module->pmem_regions_lock);
if (adsp_verify_cmd(adev->module, cmd.queue, cmd_data, cmd.len)) {
MM_ERR("module %s: verify failed.\n", adev->module->name);
rc = -EINVAL;
goto end;
}
rc = msm_adsp_write(adev->module, cmd.queue, cmd_data, cmd.len);
end:
mutex_unlock(&adev->module->pmem_regions_lock);
if (cmd.len > 256)
kfree(cmd_data);
return rc;
}
static int adsp_events_pending(struct adsp_device *adev)
{
unsigned long flags;
int yes;
spin_lock_irqsave(&adev->event_queue_lock, flags);
yes = !list_empty(&adev->event_queue);
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
return yes || adev->abort;
}
static int adsp_pmem_lookup_paddr(struct msm_adsp_module *module, void **addr,
struct adsp_pmem_region **region)
{
struct hlist_node *node;
unsigned long paddr = (unsigned long)(*addr);
struct adsp_pmem_region *region_elt;
hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) {
if (paddr >= region_elt->paddr &&
paddr < region_elt->paddr + region_elt->len) {
*region = region_elt;
return 0;
}
}
return -1;
}
int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr)
{
struct adsp_pmem_region *region;
unsigned long paddr = (unsigned long)(*addr);
unsigned long *vaddr = (unsigned long *)addr;
int ret;
ret = adsp_pmem_lookup_paddr(module, addr, ®ion);
if (ret) {
MM_ERR("not patching %s, paddr %p lookup failed\n",
module->name, vaddr);
return ret;
}
*vaddr = (unsigned long)region->vaddr + (paddr - region->paddr);
return 0;
}
static int adsp_patch_event(struct msm_adsp_module *module,
struct adsp_event *event)
{
/* call the per-module msg verifier */
if (module->patch_event)
return module->patch_event(module, event);
return 0;
}
static long adsp_get_event(struct adsp_device *adev, void __user *arg)
{
unsigned long flags;
struct adsp_event *data = NULL;
struct adsp_event_t evt;
int timeout;
long rc = 0;
if (copy_from_user(&evt, arg, sizeof(struct adsp_event_t)))
return -EFAULT;
timeout = (int)evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
adev->event_wait, adsp_events_pending(adev),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
adev->event_wait, adsp_events_pending(adev));
}
if (rc < 0)
return rc;
if (adev->abort)
return -ENODEV;
spin_lock_irqsave(&adev->event_queue_lock, flags);
if (!list_empty(&adev->event_queue)) {
data = list_first_entry(&adev->event_queue,
struct adsp_event, list);
list_del(&data->list);
}
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
if (!data)
return -EAGAIN;
/* DSP messages are type 0; they may contain physical addresses */
if (data->type == 0)
adsp_patch_event(adev->module, data);
/* map adsp_event --> adsp_event_t */
if (evt.len < data->size) {
rc = -ETOOSMALL;
goto end;
}
if (data->msg_id != EVENT_MSG_ID) {
if (copy_to_user((void *)(evt.data), data->data.msg16,
data->size)) {
rc = -EFAULT;
goto end;
}
} else {
if (copy_to_user((void *)(evt.data), data->data.msg32,
data->size)) {
rc = -EFAULT;
goto end;
}
}
evt.type = data->type; /* 0 --> from aDSP, 1 --> from ARM9 */
evt.msg_id = data->msg_id;
evt.flags = data->is16;
evt.len = data->size;
if (copy_to_user(arg, &evt, sizeof(evt)))
rc = -EFAULT;
end:
kfree(data);
return rc;
}
static long adsp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct adsp_device *adev = filp->private_data;
switch (cmd) {
case ADSP_IOCTL_ENABLE:
return msm_adsp_enable(adev->module);
case ADSP_IOCTL_DISABLE:
return msm_adsp_disable(adev->module);
case ADSP_IOCTL_DISABLE_EVENT_RSP:
return msm_adsp_disable_event_rsp(adev->module);
case ADSP_IOCTL_DISABLE_ACK:
MM_ERR("ADSP_IOCTL_DISABLE_ACK is not implemented\n");
break;
case ADSP_IOCTL_WRITE_COMMAND:
return adsp_write_cmd(adev, (void __user *) arg);
case ADSP_IOCTL_GET_EVENT:
return adsp_get_event(adev, (void __user *) arg);
case ADSP_IOCTL_SET_CLKRATE: {
unsigned long clk_rate;
if (copy_from_user(&clk_rate, (void *) arg, sizeof(clk_rate)))
return -EFAULT;
return adsp_set_clkrate(adev->module, clk_rate);
}
case ADSP_IOCTL_ABORT_EVENT_READ:
adev->abort = 1;
wake_up(&adev->event_wait);
break;
default:
break;
}
return -EINVAL;
}
static int adsp_release(struct inode *inode, struct file *filp)
{
struct adsp_device *adev = filp->private_data;
struct msm_adsp_module *module = adev->module;
int rc = 0;
MM_INFO("release '%s'\n", adev->name);
/* clear module before putting it to avoid race with open() */
adev->module = NULL;
msm_adsp_put(module);
return rc;
}
static void adsp_event(void *driver_data, unsigned id, size_t len,
void (*getevent)(void *ptr, size_t len))
{
struct adsp_device *adev = driver_data;
struct adsp_event *event;
unsigned long flags;
if (len > ADSP_EVENT_MAX_SIZE) {
MM_ERR("event too large (%d bytes)\n", len);
return;
}
event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (!event) {
MM_ERR("cannot allocate buffer\n");
return;
}
if (id != EVENT_MSG_ID) {
event->type = 0;
event->is16 = 0;
event->msg_id = id;
event->size = len;
getevent(event->data.msg16, len);
} else {
event->type = 1;
event->is16 = 1;
event->msg_id = id;
event->size = len;
getevent(event->data.msg32, len);
}
spin_lock_irqsave(&adev->event_queue_lock, flags);
list_add_tail(&event->list, &adev->event_queue);
spin_unlock_irqrestore(&adev->event_queue_lock, flags);
wake_up(&adev->event_wait);
}
static struct msm_adsp_ops adsp_ops = {
.event = adsp_event,
};
static int adsp_open(struct inode *inode, struct file *filp)
{
struct adsp_device *adev;
int rc;
rc = nonseekable_open(inode, filp);
if (rc < 0)
return rc;
adev = inode_to_device(inode);
if (!adev)
return -ENODEV;
MM_INFO("open '%s'\n", adev->name);
rc = msm_adsp_get(adev->name, &adev->module, &adsp_ops, adev);
if (rc)
return rc;
MM_INFO("opened module '%s' adev %p\n", adev->name, adev);
filp->private_data = adev;
adev->abort = 0;
INIT_HLIST_HEAD(&adev->module->pmem_regions);
mutex_init(&adev->module->pmem_regions_lock);
return 0;
}
static unsigned adsp_device_count;
static struct adsp_device *adsp_devices;
static struct adsp_device *inode_to_device(struct inode *inode)
{
unsigned n = MINOR(inode->i_rdev);
if (n < adsp_device_count) {
if (adsp_devices[n].device)
return adsp_devices + n;
}
return NULL;
}
static dev_t adsp_devno;
static struct class *adsp_class;
static const struct file_operations adsp_fops = {
.owner = THIS_MODULE,
.open = adsp_open,
.unlocked_ioctl = adsp_ioctl,
.release = adsp_release,
};
static void adsp_create(struct adsp_device *adev, const char *name,
struct device *parent, dev_t devt)
{
struct device *dev;
int rc;
dev = device_create(adsp_class, parent, devt, "%s", name);
if (IS_ERR(dev))
return;
init_waitqueue_head(&adev->event_wait);
INIT_LIST_HEAD(&adev->event_queue);
spin_lock_init(&adev->event_queue_lock);
cdev_init(&adev->cdev, &adsp_fops);
adev->cdev.owner = THIS_MODULE;
rc = cdev_add(&adev->cdev, devt, 1);
if (rc < 0) {
device_destroy(adsp_class, devt);
} else {
adev->device = dev;
adev->name = name;
}
}
void msm_adsp_publish_cdevs(struct msm_adsp_module *modules, unsigned n)
{
int rc;
adsp_devices = kzalloc(sizeof(struct adsp_device) * n, GFP_KERNEL);
if (!adsp_devices)
return;
adsp_class = class_create(THIS_MODULE, "adsp");
if (IS_ERR(adsp_class))
goto fail_create_class;
rc = alloc_chrdev_region(&adsp_devno, 0, n, "adsp");
if (rc < 0)
goto fail_alloc_region;
adsp_device_count = n;
for (n = 0; n < adsp_device_count; n++) {
adsp_create(adsp_devices + n,
modules[n].name, &modules[n].pdev.dev,
MKDEV(MAJOR(adsp_devno), n));
}
return;
fail_alloc_region:
class_unregister(adsp_class);
fail_create_class:
kfree(adsp_devices);
}
| gpl-2.0 |
v1ron/mk802iv-linux | arch/x86/oprofile/nmi_int.c | 886 | 17391 | /**
* @file nmi_int.c
*
* @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
* @author Barry Kasindorf <barry.kasindorf@amd.com>
* @author Jason Yeh <jason.yeh@amd.com>
* @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
*/
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/syscore_ops.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
#include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include "op_counter.h"
#include "op_x86_model.h"
static struct op_x86_model_spec *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
/* must be protected with get_online_cpus()/put_online_cpus(): */
static int nmi_enabled;
static int ctr_running;
struct op_counter_config counter_config[OP_MAX_COUNTER];
/* common functions */
u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
struct op_counter_config *counter_config)
{
u64 val = 0;
u16 event = (u16)counter_config->event;
val |= ARCH_PERFMON_EVENTSEL_INT;
val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
val |= (counter_config->unit_mask & 0xFF) << 8;
counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_CMASK);
val |= counter_config->extra;
event &= model->event_mask ? model->event_mask : 0xFF;
val |= event & 0xFF;
val |= (u64)(event & 0x0F00) << 24;
return val;
}
static int profile_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_NMI:
if (ctr_running)
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
else if (!nmi_enabled)
break;
else
model->stop(&__get_cpu_var(cpu_msrs));
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
static void nmi_cpu_save_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
for (i = 0; i < model->num_counters; ++i) {
if (counters[i].addr)
rdmsrl(counters[i].addr, counters[i].saved);
}
for (i = 0; i < model->num_controls; ++i) {
if (controls[i].addr)
rdmsrl(controls[i].addr, controls[i].saved);
}
}
static void nmi_cpu_start(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
model->start(msrs);
}
static int nmi_start(void)
{
get_online_cpus();
ctr_running = 1;
/* make ctr_running visible to the nmi handler: */
smp_mb();
on_each_cpu(nmi_cpu_start, NULL, 1);
put_online_cpus();
return 0;
}
static void nmi_cpu_stop(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
model->stop(msrs);
}
static void nmi_stop(void)
{
get_online_cpus();
on_each_cpu(nmi_cpu_stop, NULL, 1);
ctr_running = 0;
put_online_cpus();
}
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static DEFINE_PER_CPU(int, switch_index);
static inline int has_mux(void)
{
return !!model->switch_ctrl;
}
inline int op_x86_phys_to_virt(int phys)
{
return __this_cpu_read(switch_index) + phys;
}
inline int op_x86_virt_to_phys(int virt)
{
return virt % model->num_counters;
}
static void nmi_shutdown_mux(void)
{
int i;
if (!has_mux())
return;
for_each_possible_cpu(i) {
kfree(per_cpu(cpu_msrs, i).multiplex);
per_cpu(cpu_msrs, i).multiplex = NULL;
per_cpu(switch_index, i) = 0;
}
}
static int nmi_setup_mux(void)
{
size_t multiplex_size =
sizeof(struct op_msr) * model->num_virt_counters;
int i;
if (!has_mux())
return 1;
for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).multiplex =
kzalloc(multiplex_size, GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).multiplex)
return 0;
}
return 1;
}
static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
{
int i;
struct op_msr *multiplex = msrs->multiplex;
if (!has_mux())
return;
for (i = 0; i < model->num_virt_counters; ++i) {
if (counter_config[i].enabled) {
multiplex[i].saved = -(u64)counter_config[i].count;
} else {
multiplex[i].saved = 0;
}
}
per_cpu(switch_index, cpu) = 0;
}
static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex;
int i;
for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i);
if (counters[i].addr)
rdmsrl(counters[i].addr, multiplex[virt].saved);
}
}
static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
struct op_msr *multiplex = msrs->multiplex;
int i;
for (i = 0; i < model->num_counters; ++i) {
int virt = op_x86_phys_to_virt(i);
if (counters[i].addr)
wrmsrl(counters[i].addr, multiplex[virt].saved);
}
}
static void nmi_cpu_switch(void *dummy)
{
int cpu = smp_processor_id();
int si = per_cpu(switch_index, cpu);
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_stop(NULL);
nmi_cpu_save_mpx_registers(msrs);
/* move to next set */
si += model->num_counters;
if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
per_cpu(switch_index, cpu) = 0;
else
per_cpu(switch_index, cpu) = si;
model->switch_ctrl(model, msrs);
nmi_cpu_restore_mpx_registers(msrs);
nmi_cpu_start(NULL);
}
/*
* Quick check to see if multiplexing is necessary.
* The check should be sufficient since counters are used
* in ordre.
*/
static int nmi_multiplex_on(void)
{
return counter_config[model->num_counters].count ? 0 : -EINVAL;
}
static int nmi_switch_event(void)
{
if (!has_mux())
return -ENOSYS; /* not implemented */
if (nmi_multiplex_on() < 0)
return -EINVAL; /* not necessary */
get_online_cpus();
if (ctr_running)
on_each_cpu(nmi_cpu_switch, NULL, 1);
put_online_cpus();
return 0;
}
static inline void mux_init(struct oprofile_operations *ops)
{
if (has_mux())
ops->switch_events = nmi_switch_event;
}
static void mux_clone(int cpu)
{
if (!has_mux())
return;
memcpy(per_cpu(cpu_msrs, cpu).multiplex,
per_cpu(cpu_msrs, 0).multiplex,
sizeof(struct op_msr) * model->num_virt_counters);
}
#else
inline int op_x86_phys_to_virt(int phys) { return phys; }
inline int op_x86_virt_to_phys(int virt) { return virt; }
static inline void nmi_shutdown_mux(void) { }
static inline int nmi_setup_mux(void) { return 1; }
static inline void
nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
static inline void mux_init(struct oprofile_operations *ops) { }
static void mux_clone(int cpu) { }
#endif
static void free_msrs(void)
{
int i;
for_each_possible_cpu(i) {
kfree(per_cpu(cpu_msrs, i).counters);
per_cpu(cpu_msrs, i).counters = NULL;
kfree(per_cpu(cpu_msrs, i).controls);
per_cpu(cpu_msrs, i).controls = NULL;
}
nmi_shutdown_mux();
}
static int allocate_msrs(void)
{
size_t controls_size = sizeof(struct op_msr) * model->num_controls;
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
int i;
for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).counters)
goto fail;
per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).controls)
goto fail;
}
if (!nmi_setup_mux())
goto fail;
return 1;
fail:
free_msrs();
return 0;
}
static void nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
nmi_cpu_save_registers(msrs);
spin_lock(&oprofilefs_lock);
model->setup_ctrs(model, msrs);
nmi_cpu_setup_mux(cpu, msrs);
spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
static struct notifier_block profile_exceptions_nb = {
.notifier_call = profile_exceptions_notify,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
for (i = 0; i < model->num_controls; ++i) {
if (controls[i].addr)
wrmsrl(controls[i].addr, controls[i].saved);
}
for (i = 0; i < model->num_counters; ++i) {
if (counters[i].addr)
wrmsrl(counters[i].addr, counters[i].saved);
}
}
static void nmi_cpu_shutdown(void *dummy)
{
unsigned int v;
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
/* restoring APIC_LVTPC can trigger an apic error because the delivery
* mode and vector nr combination can be illegal. That's by design: on
* power on apic lvt contain a zero vector nr which are legal only for
* NMI delivery mode. So inhibit apic err before restoring lvtpc
*/
v = apic_read(APIC_LVTERR);
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
nmi_cpu_restore_registers(msrs);
if (model->cpu_down)
model->cpu_down();
}
static void nmi_cpu_up(void *dummy)
{
if (nmi_enabled)
nmi_cpu_setup(dummy);
if (ctr_running)
nmi_cpu_start(dummy);
}
static void nmi_cpu_down(void *dummy)
{
if (ctr_running)
nmi_cpu_stop(dummy);
if (nmi_enabled)
nmi_cpu_shutdown(dummy);
}
static int nmi_create_files(struct super_block *sb, struct dentry *root)
{
unsigned int i;
for (i = 0; i < model->num_virt_counters; ++i) {
struct dentry *dir;
char buf[4];
/* quick little hack to _not_ expose a counter if it is not
* available for use. This should protect userspace app.
* NOTE: assumes 1:1 mapping here (that counters are organized
* sequentially in their struct assignment).
*/
if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
continue;
snprintf(buf, sizeof(buf), "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
}
return 0;
}
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
int cpu = (unsigned long)data;
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block oprofile_cpu_nb = {
.notifier_call = oprofile_cpu_notifier
};
static int nmi_setup(void)
{
int err = 0;
int cpu;
if (!allocate_msrs())
return -ENOMEM;
/* We need to serialize save and setup for HT because the subset
* of msrs are distinct for save and setup operations
*/
/* Assume saved/restored counters are the same on all CPUs */
err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
if (err)
goto fail;
for_each_possible_cpu(cpu) {
if (!cpu)
continue;
memcpy(per_cpu(cpu_msrs, cpu).counters,
per_cpu(cpu_msrs, 0).counters,
sizeof(struct op_msr) * model->num_counters);
memcpy(per_cpu(cpu_msrs, cpu).controls,
per_cpu(cpu_msrs, 0).controls,
sizeof(struct op_msr) * model->num_controls);
mux_clone(cpu);
}
nmi_enabled = 0;
ctr_running = 0;
/* make variables visible to the nmi handler: */
smp_mb();
err = register_die_notifier(&profile_exceptions_nb);
if (err)
goto fail;
get_online_cpus();
register_cpu_notifier(&oprofile_cpu_nb);
nmi_enabled = 1;
/* make nmi_enabled visible to the nmi handler: */
smp_mb();
on_each_cpu(nmi_cpu_setup, NULL, 1);
put_online_cpus();
return 0;
fail:
free_msrs();
return err;
}
static void nmi_shutdown(void)
{
struct op_msrs *msrs;
get_online_cpus();
unregister_cpu_notifier(&oprofile_cpu_nb);
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
nmi_enabled = 0;
ctr_running = 0;
put_online_cpus();
/* make variables visible to the nmi handler: */
smp_mb();
unregister_die_notifier(&profile_exceptions_nb);
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
free_msrs();
put_cpu_var(cpu_msrs);
}
#ifdef CONFIG_PM
static int nmi_suspend(void)
{
/* Only one CPU left, just stop that one */
if (nmi_enabled == 1)
nmi_cpu_stop(NULL);
return 0;
}
static void nmi_resume(void)
{
if (nmi_enabled == 1)
nmi_cpu_start(NULL);
}
static struct syscore_ops oprofile_syscore_ops = {
.resume = nmi_resume,
.suspend = nmi_suspend,
};
static void __init init_suspend_resume(void)
{
register_syscore_ops(&oprofile_syscore_ops);
}
static void exit_suspend_resume(void)
{
unregister_syscore_ops(&oprofile_syscore_ops);
}
#else
static inline void init_suspend_resume(void) { }
static inline void exit_suspend_resume(void) { }
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
if (cpu_model > 6 || cpu_model == 5)
return 0;
#ifndef CONFIG_SMP
*cpu_type = "i386/p4";
model = &op_p4_spec;
return 1;
#else
switch (smp_num_siblings) {
case 1:
*cpu_type = "i386/p4";
model = &op_p4_spec;
return 1;
case 2:
*cpu_type = "i386/p4-ht";
model = &op_p4_ht2_spec;
return 1;
}
#endif
printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
return 0;
}
static int force_arch_perfmon;
static int force_cpu_type(const char *str, struct kernel_param *kp)
{
if (!strcmp(str, "arch_perfmon")) {
force_arch_perfmon = 1;
printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
}
return 0;
}
module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
if (force_arch_perfmon && cpu_has_arch_perfmon)
return 0;
/*
* Documentation on identifying Intel processors by CPU family
* and model can be found in the Intel Software Developer's
* Manuals (SDM):
*
* http://www.intel.com/products/processor/manuals/
*
* As of May 2010 the documentation for this was in the:
* "Intel 64 and IA-32 Architectures Software Developer's
* Manual Volume 3B: System Programming Guide", "Table B-1
* CPUID Signature Values of DisplayFamily_DisplayModel".
*/
switch (cpu_model) {
case 0 ... 2:
*cpu_type = "i386/ppro";
break;
case 3 ... 5:
*cpu_type = "i386/pii";
break;
case 6 ... 8:
case 10 ... 11:
*cpu_type = "i386/piii";
break;
case 9:
case 13:
*cpu_type = "i386/p6_mobile";
break;
case 14:
*cpu_type = "i386/core";
break;
case 0x0f:
case 0x16:
case 0x17:
case 0x1d:
*cpu_type = "i386/core_2";
break;
case 0x1a:
case 0x1e:
case 0x2e:
spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
break;
case 0x1c:
*cpu_type = "i386/atom";
break;
default:
/* Unknown */
return 0;
}
model = spec;
return 1;
}
int __init op_nmi_init(struct oprofile_operations *ops)
{
__u8 vendor = boot_cpu_data.x86_vendor;
__u8 family = boot_cpu_data.x86;
char *cpu_type = NULL;
int ret = 0;
if (!cpu_has_apic)
return -ENODEV;
switch (vendor) {
case X86_VENDOR_AMD:
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
switch (family) {
case 6:
cpu_type = "i386/athlon";
break;
case 0xf:
/*
* Actually it could be i386/hammer too, but
* give user space an consistent name.
*/
cpu_type = "x86-64/hammer";
break;
case 0x10:
cpu_type = "x86-64/family10";
break;
case 0x11:
cpu_type = "x86-64/family11h";
break;
case 0x12:
cpu_type = "x86-64/family12h";
break;
case 0x14:
cpu_type = "x86-64/family14h";
break;
case 0x15:
cpu_type = "x86-64/family15h";
break;
default:
return -ENODEV;
}
model = &op_amd_spec;
break;
case X86_VENDOR_INTEL:
switch (family) {
/* Pentium IV */
case 0xf:
p4_init(&cpu_type);
break;
/* A P6-class processor */
case 6:
ppro_init(&cpu_type);
break;
default:
break;
}
if (cpu_type)
break;
if (!cpu_has_arch_perfmon)
return -ENODEV;
/* use arch perfmon as fallback */
cpu_type = "i386/arch_perfmon";
model = &op_arch_perfmon_spec;
break;
default:
return -ENODEV;
}
/* default values, can be overwritten by model */
ops->create_files = nmi_create_files;
ops->setup = nmi_setup;
ops->shutdown = nmi_shutdown;
ops->start = nmi_start;
ops->stop = nmi_stop;
ops->cpu_type = cpu_type;
if (model->init)
ret = model->init(ops);
if (ret)
return ret;
if (!model->num_virt_counters)
model->num_virt_counters = model->num_counters;
mux_init(ops);
init_suspend_resume();
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
}
void op_nmi_exit(void)
{
exit_suspend_resume();
}
| gpl-2.0 |
bigbiff/i717-GB-Kernel | fs/signalfd.c | 886 | 7365 | /*
* fs/signalfd.c
*
* Copyright (C) 2003 Linus Torvalds
*
* Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org>
* Changed ->read() to return a siginfo strcture instead of signal number.
* Fixed locking in ->poll().
* Added sighand-detach notification.
* Added fd re-use in sys_signalfd() syscall.
* Now using anonymous inode source.
* Thanks to Oleg Nesterov for useful code review and suggestions.
* More comments and suggestions from Arnd Bergmann.
* Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
* Retrieve multiple signals with one read() call
* Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
* Attach to the sighand only during read() and poll().
*/
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <linux/signalfd.h>
#include <linux/syscalls.h>
struct signalfd_ctx {
sigset_t sigmask;
};
static int signalfd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static unsigned int signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
unsigned int events = 0;
poll_wait(file, ¤t->sighand->signalfd_wqh, wait);
spin_lock_irq(¤t->sighand->siglock);
if (next_signal(¤t->pending, &ctx->sigmask) ||
next_signal(¤t->signal->shared_pending,
&ctx->sigmask))
events |= POLLIN;
spin_unlock_irq(¤t->sighand->siglock);
return events;
}
/*
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
siginfo_t const *kinfo)
{
long err;
BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
/*
* Unused members should be zero ...
*/
err = __clear_user(uinfo, sizeof(*uinfo));
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly.
*/
err |= __put_user(kinfo->si_signo, &uinfo->ssi_signo);
err |= __put_user(kinfo->si_errno, &uinfo->ssi_errno);
err |= __put_user((short) kinfo->si_code, &uinfo->ssi_code);
switch (kinfo->si_code & __SI_MASK) {
case __SI_KILL:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
break;
case __SI_TIMER:
err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
case __SI_POLL:
err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
err |= __put_user(kinfo->si_fd, &uinfo->ssi_fd);
break;
case __SI_FAULT:
err |= __put_user((long) kinfo->si_addr, &uinfo->ssi_addr);
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
#endif
break;
case __SI_CHLD:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user(kinfo->si_status, &uinfo->ssi_status);
err |= __put_user(kinfo->si_utime, &uinfo->ssi_utime);
err |= __put_user(kinfo->si_stime, &uinfo->ssi_stime);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ: /* But this is */
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
default:
/*
* This case catches also the signals queued by sigqueue().
*/
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
}
return err ? -EFAULT: sizeof(*uinfo);
}
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
int nonblock)
{
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(¤t->sighand->siglock);
ret = dequeue_signal(current, &ctx->sigmask, info);
switch (ret) {
case 0:
if (!nonblock)
break;
ret = -EAGAIN;
default:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = dequeue_signal(current, &ctx->sigmask, info);
if (ret != 0)
break;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
spin_unlock_irq(¤t->sighand->siglock);
schedule();
spin_lock_irq(¤t->sighand->siglock);
}
spin_unlock_irq(¤t->sighand->siglock);
remove_wait_queue(¤t->sighand->signalfd_wqh, &wait);
__set_current_state(TASK_RUNNING);
return ret;
}
/*
* Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
* error code. The "count" parameter must be at least the size of a
* "struct signalfd_siginfo".
*/
static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct signalfd_ctx *ctx = file->private_data;
struct signalfd_siginfo __user *siginfo;
int nonblock = file->f_flags & O_NONBLOCK;
ssize_t ret, total = 0;
siginfo_t info;
count /= sizeof(struct signalfd_siginfo);
if (!count)
return -EINVAL;
siginfo = (struct signalfd_siginfo __user *) buf;
do {
ret = signalfd_dequeue(ctx, &info, nonblock);
if (unlikely(ret <= 0))
break;
ret = signalfd_copyinfo(siginfo, &info);
if (ret < 0)
break;
siginfo++;
total += ret;
nonblock = 1;
} while (--count);
return total ? total: ret;
}
static const struct file_operations signalfd_fops = {
.release = signalfd_release,
.poll = signalfd_poll,
.read = signalfd_read,
};
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask, int, flags)
{
sigset_t sigmask;
struct signalfd_ctx *ctx;
/* Check the SFD_* constants for consistency. */
BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
return -EINVAL;
if (sizemask != sizeof(sigset_t) ||
copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
return -EINVAL;
sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&sigmask);
if (ufd == -1) {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->sigmask = sigmask;
/*
* When we call this, the initialization must be complete, since
* anon_inode_getfd() will install the fd.
*/
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
if (ufd < 0)
kfree(ctx);
} else {
struct file *file = fget(ufd);
if (!file)
return -EBADF;
ctx = file->private_data;
if (file->f_op != &signalfd_fops) {
fput(file);
return -EINVAL;
}
spin_lock_irq(¤t->sighand->siglock);
ctx->sigmask = sigmask;
spin_unlock_irq(¤t->sighand->siglock);
wake_up(¤t->sighand->signalfd_wqh);
fput(file);
}
return ufd;
}
SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask)
{
return sys_signalfd4(ufd, user_mask, sizemask, 0);
}
| gpl-2.0 |
partyajak/android_kernel_lge_g2m | drivers/video/msm/mdss/mdss_qpic.c | 1398 | 14945 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <mach/sps.h>
#include <mach/clk.h>
#include <mach/hardware.h>
#include "mdss_fb.h"
#include "mdss_qpic.h"
static int mdss_qpic_probe(struct platform_device *pdev);
static int mdss_qpic_remove(struct platform_device *pdev);
struct qpic_data_type *qpic_res;
/* for tuning */
static u32 use_bam = true;
static u32 use_irq;
static u32 use_vsync;
static const struct of_device_id mdss_qpic_dt_match[] = {
{ .compatible = "qcom,mdss_qpic",},
{}
};
MODULE_DEVICE_TABLE(of, mdss_qpic_dt_match);
static struct platform_driver mdss_qpic_driver = {
.probe = mdss_qpic_probe,
.remove = mdss_qpic_remove,
.shutdown = NULL,
.driver = {
/*
* Simulate mdp hw
*/
.name = "mdp",
.of_match_table = mdss_qpic_dt_match,
},
};
int qpic_on(struct msm_fb_data_type *mfd)
{
int ret;
ret = mdss_qpic_panel_on(qpic_res->panel_data);
return ret;
}
int qpic_off(struct msm_fb_data_type *mfd)
{
int ret;
ret = mdss_qpic_panel_off(qpic_res->panel_data);
return ret;
}
static void mdss_qpic_pan_display(struct msm_fb_data_type *mfd)
{
struct fb_info *fbi;
u32 offset, fb_offset, size;
int bpp;
if (!mfd) {
pr_err("%s: mfd is NULL!", __func__);
return;
}
fbi = mfd->fbi;
bpp = fbi->var.bits_per_pixel / 8;
offset = fbi->var.xoffset * bpp +
fbi->var.yoffset * fbi->fix.line_length;
if (offset > fbi->fix.smem_len) {
pr_err("invalid fb offset=%u total length=%u\n",
offset, fbi->fix.smem_len);
return;
}
fb_offset = (u32)fbi->fix.smem_start + offset;
mdss_qpic_panel_on(qpic_res->panel_data);
size = fbi->var.xres * fbi->var.yres * bpp;
qpic_send_frame(0, 0, fbi->var.xres, fbi->var.yres,
(u32 *)fb_offset, size);
}
int mdss_qpic_alloc_fb_mem(struct msm_fb_data_type *mfd)
{
size_t size;
u32 yres = mfd->fbi->var.yres_virtual;
size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
if (!qpic_res->res_init)
return -EINVAL;
if (mfd->index != 0) {
mfd->fbi->fix.smem_start = 0;
mfd->fbi->screen_base = NULL;
mfd->fbi->fix.smem_len = 0;
mfd->iova = 0;
return 0;
}
if (!qpic_res->fb_virt) {
qpic_res->fb_virt = (void *)dmam_alloc_coherent(
&qpic_res->pdev->dev,
size + QPIC_MAX_CMD_BUF_SIZE,
&qpic_res->fb_phys,
GFP_KERNEL);
pr_err("%s size=%d vir_addr=%x phys_addr=%x",
__func__, size, (int)qpic_res->fb_virt,
(int)qpic_res->fb_phys);
if (!qpic_res->fb_virt)
return -ENOMEM;
qpic_res->cmd_buf_virt = qpic_res->fb_virt + size;
qpic_res->cmd_buf_phys = qpic_res->fb_phys + size;
}
mfd->fbi->fix.smem_start = qpic_res->fb_phys;
mfd->fbi->screen_base = qpic_res->fb_virt;
mfd->fbi->fix.smem_len = size;
mfd->iova = 0;
return 0;
}
u32 mdss_qpic_fb_stride(u32 fb_index, u32 xres, int bpp)
{
return xres * bpp;
}
int mdss_qpic_overlay_init(struct msm_fb_data_type *mfd)
{
struct msm_mdp_interface *qpic_interface = &mfd->mdp;
qpic_interface->on_fnc = qpic_on;
qpic_interface->off_fnc = qpic_off;
qpic_interface->do_histogram = NULL;
qpic_interface->cursor_update = NULL;
qpic_interface->dma_fnc = mdss_qpic_pan_display;
qpic_interface->ioctl_handler = NULL;
qpic_interface->kickoff_fnc = NULL;
return 0;
}
int qpic_register_panel(struct mdss_panel_data *pdata)
{
struct platform_device *mdss_fb_dev = NULL;
int rc;
mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
if (!mdss_fb_dev) {
pr_err("unable to allocate mdss_fb device\n");
return -ENOMEM;
}
mdss_fb_dev->dev.platform_data = pdata;
rc = platform_device_add(mdss_fb_dev);
if (rc) {
platform_device_put(mdss_fb_dev);
pr_err("unable to probe mdss_fb device (%d)\n", rc);
return rc;
}
qpic_res->panel_data = pdata;
return rc;
}
int qpic_init_sps(struct platform_device *pdev,
struct qpic_sps_endpt *end_point)
{
int rc = 0;
struct sps_pipe *pipe_handle;
struct sps_connect *sps_config = &end_point->config;
struct sps_register_event *sps_event = &end_point->bam_event;
struct sps_bam_props bam = {0};
u32 bam_handle = 0;
if (qpic_res->sps_init)
return 0;
bam.phys_addr = qpic_res->qpic_phys + 0x4000;
bam.virt_addr = qpic_res->qpic_base + 0x4000;
bam.irq = qpic_res->irq - 4;
bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
rc = sps_phy2h(bam.phys_addr, &bam_handle);
if (rc)
rc = sps_register_bam_device(&bam, &bam_handle);
if (rc) {
pr_err("%s bam_handle is NULL", __func__);
rc = -ENOMEM;
goto out;
}
pipe_handle = sps_alloc_endpoint();
if (!pipe_handle) {
pr_err("sps_alloc_endpoint() failed\n");
rc = -ENOMEM;
goto out;
}
rc = sps_get_config(pipe_handle, sps_config);
if (rc) {
pr_err("sps_get_config() failed %d\n", rc);
goto free_endpoint;
}
/* WRITE CASE: source - system memory; destination - BAM */
sps_config->source = SPS_DEV_HANDLE_MEM;
sps_config->destination = bam_handle;
sps_config->mode = SPS_MODE_DEST;
sps_config->dest_pipe_index = 6;
sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
sps_config->lock_group = 0;
/*
* Descriptor FIFO is a cyclic FIFO. If 64 descriptors
* are allowed to be submitted before we get any ack for any of them,
* the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
* sizeof(struct sps_iovec).
*/
sps_config->desc.size = (64) *
sizeof(struct sps_iovec);
sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
sps_config->desc.size,
&sps_config->desc.phys_base,
GFP_KERNEL);
if (!sps_config->desc.base) {
pr_err("dmam_alloc_coherent() failed for size %x\n",
sps_config->desc.size);
rc = -ENOMEM;
goto free_endpoint;
}
memset(sps_config->desc.base, 0x00, sps_config->desc.size);
rc = sps_connect(pipe_handle, sps_config);
if (rc) {
pr_err("sps_connect() failed %d\n", rc);
goto free_endpoint;
}
init_completion(&end_point->completion);
sps_event->mode = SPS_TRIGGER_WAIT;
sps_event->options = SPS_O_EOT;
sps_event->xfer_done = &end_point->completion;
sps_event->user = (void *)qpic_res;
rc = sps_register_event(pipe_handle, sps_event);
if (rc) {
pr_err("sps_register_event() failed %d\n", rc);
goto sps_disconnect;
}
end_point->handle = pipe_handle;
qpic_res->sps_init = true;
goto out;
sps_disconnect:
sps_disconnect(pipe_handle);
free_endpoint:
sps_free_endpoint(pipe_handle);
out:
return rc;
}
void mdss_qpic_reset(void)
{
u32 time_end;
QPIC_OUTP(QPIC_REG_QPIC_LCDC_RESET, 1 << 0);
/* wait 100 us after reset as suggested by hw */
usleep(100);
time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
while (((QPIC_INP(QPIC_REG_QPIC_LCDC_STTS) & (1 << 8)) == 0)) {
if ((u32)ktime_to_ms(ktime_get()) > time_end) {
pr_err("%s reset not finished", __func__);
break;
}
/* yield 100 us for next polling by experiment*/
usleep(100);
}
}
void qpic_interrupt_en(u32 en)
{
QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
if (en) {
if (!qpic_res->irq_ena) {
qpic_res->irq_ena = true;
enable_irq(qpic_res->irq);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN,
(1 << 0) | (1 << 2));
}
} else {
QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
disable_irq(qpic_res->irq);
qpic_res->irq_ena = false;
}
}
static irqreturn_t qpic_irq_handler(int irq, void *ptr)
{
u32 data;
data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
return 0;
}
int qpic_flush_buffer_bam(u32 cmd, u32 len, u32 *param, u32 is_cmd)
{
int ret = 0;
u32 phys_addr, cfg2, block_len , flags;
if (is_cmd) {
memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
invalidate_caches((unsigned long)qpic_res->cmd_buf_virt,
len,
(unsigned long)qpic_res->cmd_buf_phys);
phys_addr = qpic_res->cmd_buf_phys;
} else {
phys_addr = (u32)param;
}
cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
cfg2 &= ~0xFF;
cfg2 |= cmd;
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
block_len = 0x7FF0;
while (len > 0) {
if (len <= 0x7FF0) {
flags = SPS_IOVEC_FLAG_EOT;
block_len = len;
} else {
flags = 0;
}
ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
phys_addr, block_len, NULL, flags);
if (ret)
pr_err("failed to submit command %x ret %d\n",
cmd, ret);
phys_addr += block_len;
len -= block_len;
}
ret = wait_for_completion_timeout(
&qpic_res->qpic_endpt.completion,
msecs_to_jiffies(100 * 4));
if (ret <= 0)
pr_err("%s timeout %x", __func__, ret);
else
ret = 0;
return ret;
}
int qpic_flush_buffer_sw(u32 cmd, u32 len, u32 *param, u32 is_cmd)
{
u32 bytes_left, space, data, cfg2, time_end;
int i, ret = 0;
if ((len <= (sizeof(u32) * 4)) && (is_cmd)) {
len >>= 2;/* len in dwords */
data = 0;
for (i = 0; i < len; i++)
data |= param[i] << (8 * i);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, len);
QPIC_OUTP(QPIC_REG_LCD_DEVICE_CMD0 + (4 * cmd), data);
return 0;
}
if ((len & 0x1) != 0) {
pr_err("%s: number of bytes needs be even", __func__);
len = (len + 1) & (~0x1);
}
QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
cfg2 |= (1 << 24); /* transparent mode */
cfg2 &= ~0xFF;
cfg2 |= cmd;
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_SOF, 0x0);
bytes_left = len;
while (bytes_left > 0) {
time_end = (u32)ktime_to_ms(ktime_get()) +
QPIC_MAX_VSYNC_WAIT_TIME;
while (1) {
data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
data &= 0x3F;
if (data == 0)
break;
/* yield 10 us for next polling by experiment*/
usleep(10);
if (ktime_to_ms(ktime_get()) > time_end) {
pr_err("%s time out", __func__);
ret = -EBUSY;
goto exit_send_cmd_sw;
}
}
space = (16 - data);
while ((space > 0) && (bytes_left > 0)) {
/* write to fifo */
if (bytes_left >= 4) {
QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
param[0]);
param++;
bytes_left -= 4;
space--;
} else if (bytes_left == 2) {
QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
*(u16 *)param);
bytes_left -= 2;
}
}
}
/* finished */
QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_EOF, 0x0);
time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
while (1) {
data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
if (data & (1 << 2))
break;
/* yield 10 us for next polling by experiment*/
usleep(10);
if (ktime_to_ms(ktime_get()) > time_end) {
pr_err("%s wait for eof time out", __func__);
ret = -EBUSY;
goto exit_send_cmd_sw;
}
}
exit_send_cmd_sw:
cfg2 &= ~(1 << 24);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
return ret;
}
int qpic_flush_buffer(u32 cmd, u32 len, u32 *param, u32 is_cmd)
{
if (use_bam) {
if (is_cmd)
return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
else
return qpic_flush_buffer_bam(cmd, len, param, is_cmd);
} else {
return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
}
}
int mdss_qpic_init(void)
{
int ret = 0;
u32 data;
mdss_qpic_reset();
pr_info("%s version=%x", __func__, QPIC_INP(QPIC_REG_LCDC_VERSION));
data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
/* clear vsync wait , bam mode = 0*/
data &= ~(3 << 0);
data &= ~(0x1f << 3);
data |= (1 << 3); /* threshold */
data |= (1 << 8); /* lcd_en */
data &= ~(0x1f << 9);
data |= (1 << 9); /* threshold */
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
if (use_irq && qpic_res->irq_requested) {
ret = devm_request_irq(&qpic_res->pdev->dev,
qpic_res->irq, qpic_irq_handler,
IRQF_DISABLED, "QPIC", qpic_res);
if (ret) {
pr_err("qpic request_irq() failed!\n");
use_irq = false;
}
qpic_res->irq_requested = true;
}
qpic_interrupt_en(use_irq);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG0, 0x02108501);
data = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
data &= ~(0xFFF);
data |= 0x200; /* XRGB */
data |= 0x2C;
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, data);
if (use_bam) {
qpic_init_sps(qpic_res->pdev , &qpic_res->qpic_endpt);
data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
data |= (1 << 1);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
}
/* TE enable */
if (use_vsync) {
data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
data |= (1 << 0);
QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
}
return ret;
}
static int mdss_qpic_probe(struct platform_device *pdev)
{
struct resource *res;
int rc = 0;
static struct msm_mdp_interface qpic_interface = {
.init_fnc = mdss_qpic_overlay_init,
.fb_mem_alloc_fnc = mdss_qpic_alloc_fb_mem,
.fb_stride = mdss_qpic_fb_stride,
};
if (!pdev->dev.of_node) {
pr_err("qpic driver only supports device tree probe\n");
return -ENOTSUPP;
}
if (!qpic_res)
qpic_res = devm_kzalloc(&pdev->dev,
sizeof(*qpic_res), GFP_KERNEL);
if (!qpic_res)
return -ENOMEM;
if (qpic_res->res_init) {
pr_err("qpic already initialized\n");
return -EINVAL;
}
pdev->id = 0;
qpic_res->pdev = pdev;
platform_set_drvdata(pdev, qpic_res);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qpic_base");
if (!res) {
pr_err("unable to get QPIC reg base address\n");
rc = -ENOMEM;
goto probe_done;
}
qpic_res->qpic_reg_size = resource_size(res);
qpic_res->qpic_base = devm_ioremap(&pdev->dev, res->start,
qpic_res->qpic_reg_size);
if (unlikely(!qpic_res->qpic_base)) {
pr_err("unable to map MDSS QPIC base\n");
rc = -ENOMEM;
goto probe_done;
}
qpic_res->qpic_phys = res->start;
pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
(int) res->start,
(int) qpic_res->qpic_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
pr_err("unable to get QPIC irq\n");
rc = -ENOMEM;
goto probe_done;
}
qpic_res->irq = res->start;
qpic_res->res_init = true;
rc = mdss_fb_register_mdp_instance(&qpic_interface);
if (rc)
pr_err("unable to register QPIC instance\n");
probe_done:
return rc;
}
static int mdss_qpic_remove(struct platform_device *pdev)
{
return 0;
}
static int __init mdss_qpic_driver_init(void)
{
int ret;
ret = platform_driver_register(&mdss_qpic_driver);
if (ret)
pr_err("mdss_qpic_register_driver() failed!\n");
return ret;
}
module_init(mdss_qpic_driver_init);
| gpl-2.0 |
vfalico/popcorn | fs/ext3/resize.c | 3190 | 34858 | /*
* linux/fs/ext3/resize.c
*
* Support for resizing an ext3 filesystem while it is mounted.
*
* Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
*
* This could probably be made into a module, because it is not often in use.
*/
#define EXT3FS_DEBUG
#include <linux/ext3_jbd.h>
#include <linux/errno.h>
#include <linux/slab.h>
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
static int verify_group_input(struct super_block *sb,
struct ext3_new_group_data *input)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
ext3_fsblk_t start = le32_to_cpu(es->s_blocks_count);
ext3_fsblk_t end = start + input->blocks_count;
unsigned group = input->group;
ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
unsigned overhead = ext3_bg_has_super(sb, group) ?
(1 + ext3_bg_num_gdb(sb, group) +
le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
ext3_fsblk_t metaend = start + overhead;
struct buffer_head *bh = NULL;
ext3_grpblk_t free_blocks_count;
int err = -EINVAL;
input->free_blocks_count = free_blocks_count =
input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
"(%d free, %u reserved)\n",
ext3_bg_has_super(sb, input->group) ? "normal" :
"no-super", input->group, input->blocks_count,
free_blocks_count, input->reserved_blocks);
if (group != sbi->s_groups_count)
ext3_warning(sb, __func__,
"Cannot add at group %u (only %lu groups)",
input->group, sbi->s_groups_count);
else if ((start - le32_to_cpu(es->s_first_data_block)) %
EXT3_BLOCKS_PER_GROUP(sb))
ext3_warning(sb, __func__, "Last group not full");
else if (input->reserved_blocks > input->blocks_count / 5)
ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
input->reserved_blocks);
else if (free_blocks_count < 0)
ext3_warning(sb, __func__, "Bad blocks count %u",
input->blocks_count);
else if (!(bh = sb_bread(sb, end - 1)))
ext3_warning(sb, __func__,
"Cannot read last block ("E3FSBLK")",
end - 1);
else if (outside(input->block_bitmap, start, end))
ext3_warning(sb, __func__,
"Block bitmap not in group (block %u)",
input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
ext3_warning(sb, __func__,
"Inode bitmap not in group (block %u)",
input->inode_bitmap);
else if (outside(input->inode_table, start, end) ||
outside(itend - 1, start, end))
ext3_warning(sb, __func__,
"Inode table not in group (blocks %u-"E3FSBLK")",
input->inode_table, itend - 1);
else if (input->inode_bitmap == input->block_bitmap)
ext3_warning(sb, __func__,
"Block bitmap same as inode bitmap (%u)",
input->block_bitmap);
else if (inside(input->block_bitmap, input->inode_table, itend))
ext3_warning(sb, __func__,
"Block bitmap (%u) in inode table (%u-"E3FSBLK")",
input->block_bitmap, input->inode_table, itend-1);
else if (inside(input->inode_bitmap, input->inode_table, itend))
ext3_warning(sb, __func__,
"Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
input->inode_bitmap, input->inode_table, itend-1);
else if (inside(input->block_bitmap, start, metaend))
ext3_warning(sb, __func__,
"Block bitmap (%u) in GDT table"
" ("E3FSBLK"-"E3FSBLK")",
input->block_bitmap, start, metaend - 1);
else if (inside(input->inode_bitmap, start, metaend))
ext3_warning(sb, __func__,
"Inode bitmap (%u) in GDT table"
" ("E3FSBLK"-"E3FSBLK")",
input->inode_bitmap, start, metaend - 1);
else if (inside(input->inode_table, start, metaend) ||
inside(itend - 1, start, metaend))
ext3_warning(sb, __func__,
"Inode table (%u-"E3FSBLK") overlaps"
"GDT table ("E3FSBLK"-"E3FSBLK")",
input->inode_table, itend - 1, start, metaend - 1);
else
err = 0;
brelse(bh);
return err;
}
static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
ext3_fsblk_t blk)
{
struct buffer_head *bh;
int err;
bh = sb_getblk(sb, blk);
if (!bh)
return ERR_PTR(-EIO);
if ((err = ext3_journal_get_write_access(handle, bh))) {
brelse(bh);
bh = ERR_PTR(err);
} else {
lock_buffer(bh);
memset(bh->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
}
return bh;
}
/*
* To avoid calling the atomic setbit hundreds or thousands of times, we only
* need to use it within a single byte (to ensure we get endianness right).
* We can use memset for the rest of the bitmap as there are no other users.
*/
static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
{
int i;
if (start_bit >= end_bit)
return;
ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
ext3_set_bit(i, bitmap);
if (i < end_bit)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
/*
* If we have fewer than thresh credits, extend by EXT3_MAX_TRANS_DATA.
* If that fails, restart the transaction & regain write access for the
* buffer head which is used for block_bitmap modifications.
*/
static int extend_or_restart_transaction(handle_t *handle, int thresh,
struct buffer_head *bh)
{
int err;
if (handle->h_buffer_credits >= thresh)
return 0;
err = ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA);
if (err < 0)
return err;
if (err) {
err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA);
if (err)
return err;
err = ext3_journal_get_write_access(handle, bh);
if (err)
return err;
}
return 0;
}
/*
* Set up the block and inode bitmaps, and the inode table for the new group.
* This doesn't need to be part of the main transaction, since we are only
* changing blocks outside the actual filesystem. We still do journaling to
* ensure the recovery is correct in case of a failure just after resize.
* If any part of this fails, we simply abort the resize.
*/
static int setup_new_group_blocks(struct super_block *sb,
struct ext3_new_group_data *input)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group);
int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
struct buffer_head *bh;
handle_t *handle;
ext3_fsblk_t block;
ext3_grpblk_t bit;
int i;
int err = 0, err2;
/* This transaction may be extended/restarted along the way */
handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
if (IS_ERR(handle))
return PTR_ERR(handle);
mutex_lock(&sbi->s_resize_lock);
if (input->group != sbi->s_groups_count) {
err = -EBUSY;
goto exit_journal;
}
if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
err = PTR_ERR(bh);
goto exit_journal;
}
if (ext3_bg_has_super(sb, input->group)) {
ext3_debug("mark backup superblock %#04lx (+0)\n", start);
ext3_set_bit(0, bh->b_data);
}
/* Copy all of the GDT blocks into the backup in this group */
for (i = 0, bit = 1, block = start + 1;
i < gdblocks; i++, block++, bit++) {
struct buffer_head *gdb;
ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
err = extend_or_restart_transaction(handle, 1, bh);
if (err)
goto exit_bh;
gdb = sb_getblk(sb, block);
if (!gdb) {
err = -EIO;
goto exit_bh;
}
if ((err = ext3_journal_get_write_access(handle, gdb))) {
brelse(gdb);
goto exit_bh;
}
lock_buffer(gdb);
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
unlock_buffer(gdb);
err = ext3_journal_dirty_metadata(handle, gdb);
if (err) {
brelse(gdb);
goto exit_bh;
}
ext3_set_bit(bit, bh->b_data);
brelse(gdb);
}
/* Zero out all of the reserved backup group descriptor table blocks */
for (i = 0, bit = gdblocks + 1, block = start + bit;
i < reserved_gdb; i++, block++, bit++) {
struct buffer_head *gdb;
ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
err = extend_or_restart_transaction(handle, 1, bh);
if (err)
goto exit_bh;
if (IS_ERR(gdb = bclean(handle, sb, block))) {
err = PTR_ERR(gdb);
goto exit_bh;
}
err = ext3_journal_dirty_metadata(handle, gdb);
if (err) {
brelse(gdb);
goto exit_bh;
}
ext3_set_bit(bit, bh->b_data);
brelse(gdb);
}
ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
input->block_bitmap - start);
ext3_set_bit(input->block_bitmap - start, bh->b_data);
ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
input->inode_bitmap - start);
ext3_set_bit(input->inode_bitmap - start, bh->b_data);
/* Zero out all of the inode table blocks */
for (i = 0, block = input->inode_table, bit = block - start;
i < sbi->s_itb_per_group; i++, bit++, block++) {
struct buffer_head *it;
ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
err = extend_or_restart_transaction(handle, 1, bh);
if (err)
goto exit_bh;
if (IS_ERR(it = bclean(handle, sb, block))) {
err = PTR_ERR(it);
goto exit_bh;
}
err = ext3_journal_dirty_metadata(handle, it);
if (err) {
brelse(it);
goto exit_bh;
}
brelse(it);
ext3_set_bit(bit, bh->b_data);
}
err = extend_or_restart_transaction(handle, 2, bh);
if (err)
goto exit_bh;
mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
bh->b_data);
err = ext3_journal_dirty_metadata(handle, bh);
if (err)
goto exit_bh;
brelse(bh);
/* Mark unused entries in inode bitmap used */
ext3_debug("clear inode bitmap %#04x (+%ld)\n",
input->inode_bitmap, input->inode_bitmap - start);
if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
err = PTR_ERR(bh);
goto exit_journal;
}
mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
bh->b_data);
err = ext3_journal_dirty_metadata(handle, bh);
exit_bh:
brelse(bh);
exit_journal:
mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
return err;
}
/*
* Iterate through the groups which hold BACKUP superblock/GDT copies in an
* ext3 filesystem. The counters should be initialized to 1, 5, and 7 before
* calling this for the first time. In a sparse filesystem it will be the
* sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
* For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
*/
static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
unsigned *five, unsigned *seven)
{
unsigned *min = three;
int mult = 3;
unsigned ret;
if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ret = *min;
*min += 1;
return ret;
}
if (*five < *min) {
min = five;
mult = 5;
}
if (*seven < *min) {
min = seven;
mult = 7;
}
ret = *min;
*min *= mult;
return ret;
}
/*
* Check that all of the backup GDT blocks are held in the primary GDT block.
* It is assumed that they are stored in group order. Returns the number of
* groups in current filesystem that have BACKUPS, or -ve error code.
*/
static int verify_reserved_gdb(struct super_block *sb,
struct buffer_head *primary)
{
const ext3_fsblk_t blk = primary->b_blocknr;
const unsigned long end = EXT3_SB(sb)->s_groups_count;
unsigned three = 1;
unsigned five = 5;
unsigned seven = 7;
unsigned grp;
__le32 *p = (__le32 *)primary->b_data;
int gdbackups = 0;
while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
ext3_warning(sb, __func__,
"reserved GDT "E3FSBLK
" missing grp %d ("E3FSBLK")",
blk, grp,
grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
return -EINVAL;
}
if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
return -EFBIG;
}
return gdbackups;
}
/*
* Called when we need to bring a reserved group descriptor table block into
* use from the resize inode. The primary copy of the new GDT block currently
* is an indirect block (under the double indirect block in the resize inode).
* The new backup GDT blocks will be stored as leaf blocks in this indirect
* block, in group order. Even though we know all the block numbers we need,
* we check to ensure that the resize inode has actually reserved these blocks.
*
* Don't need to update the block bitmaps because the blocks are still in use.
*
* We get all of the error cases out of the way, so that we are sure to not
* fail once we start modifying the data on disk, because JBD has no rollback.
*/
static int add_new_gdb(handle_t *handle, struct inode *inode,
struct ext3_new_group_data *input,
struct buffer_head **primary)
{
struct super_block *sb = inode->i_sb;
struct ext3_super_block *es = EXT3_SB(sb)->s_es;
unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
ext3_fsblk_t gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
struct buffer_head **o_group_desc, **n_group_desc;
struct buffer_head *dind;
int gdbackups;
struct ext3_iloc iloc;
__le32 *data;
int err;
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG
"EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
gdb_num);
/*
* If we are not using the primary superblock/GDT copy don't resize,
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
if (EXT3_SB(sb)->s_sbh->b_blocknr !=
le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
ext3_warning(sb, __func__,
"won't resize using backup superblock at %llu",
(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
return -EPERM;
}
*primary = sb_bread(sb, gdblock);
if (!*primary)
return -EIO;
if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
err = gdbackups;
goto exit_bh;
}
data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
dind = sb_bread(sb, le32_to_cpu(*data));
if (!dind) {
err = -EIO;
goto exit_bh;
}
data = (__le32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
ext3_warning(sb, __func__,
"new group %u GDT block "E3FSBLK" not reserved",
input->group, gdblock);
err = -EINVAL;
goto exit_dind;
}
if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
goto exit_dind;
if ((err = ext3_journal_get_write_access(handle, *primary)))
goto exit_sbh;
if ((err = ext3_journal_get_write_access(handle, dind)))
goto exit_primary;
/* ext3_reserve_inode_write() gets a reference on the iloc */
if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
goto exit_dindj;
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
GFP_NOFS);
if (!n_group_desc) {
err = -ENOMEM;
ext3_warning (sb, __func__,
"not enough memory for %lu groups", gdb_num + 1);
goto exit_inode;
}
/*
* Finally, we have all of the possible failures behind us...
*
* Remove new GDT block from inode double-indirect block and clear out
* the new GDT block for use (which also "frees" the backup GDT blocks
* from the reserved inode). We don't need to change the bitmaps for
* these blocks, because they are marked as in-use from being in the
* reserved inode, and will become GDT blocks (primary and backup).
*/
data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
err = ext3_journal_dirty_metadata(handle, dind);
if (err)
goto exit_group_desc;
brelse(dind);
dind = NULL;
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
if (err)
goto exit_group_desc;
memset((*primary)->b_data, 0, sb->s_blocksize);
err = ext3_journal_dirty_metadata(handle, *primary);
if (err)
goto exit_group_desc;
o_group_desc = EXT3_SB(sb)->s_group_desc;
memcpy(n_group_desc, o_group_desc,
EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
n_group_desc[gdb_num] = *primary;
EXT3_SB(sb)->s_group_desc = n_group_desc;
EXT3_SB(sb)->s_gdb_count++;
kfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
if (err)
goto exit_inode;
return 0;
exit_group_desc:
kfree(n_group_desc);
exit_inode:
//ext3_journal_release_buffer(handle, iloc.bh);
brelse(iloc.bh);
exit_dindj:
//ext3_journal_release_buffer(handle, dind);
exit_primary:
//ext3_journal_release_buffer(handle, *primary);
exit_sbh:
//ext3_journal_release_buffer(handle, *primary);
exit_dind:
brelse(dind);
exit_bh:
brelse(*primary);
ext3_debug("leaving with error %d\n", err);
return err;
}
/*
* Called when we are adding a new group which has a backup copy of each of
* the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
* We need to add these reserved backup GDT blocks to the resize inode, so
* that they are kept for future resizing and not allocated to files.
*
* Each reserved backup GDT block will go into a different indirect block.
* The indirect blocks are actually the primary reserved GDT blocks,
* so we know in advance what their block numbers are. We only get the
* double-indirect block to verify it is pointing to the primary reserved
* GDT blocks so we don't overwrite a data block by accident. The reserved
* backup GDT blocks are stored in their reserved primary GDT block.
*/
static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
struct ext3_new_group_data *input)
{
struct super_block *sb = inode->i_sb;
int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
struct buffer_head **primary;
struct buffer_head *dind;
struct ext3_iloc iloc;
ext3_fsblk_t blk;
__le32 *data, *end;
int gdbackups = 0;
int res, i;
int err;
primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
if (!primary)
return -ENOMEM;
data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
dind = sb_bread(sb, le32_to_cpu(*data));
if (!dind) {
err = -EIO;
goto exit_free;
}
blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
data = (__le32 *)dind->b_data + (EXT3_SB(sb)->s_gdb_count %
EXT3_ADDR_PER_BLOCK(sb));
end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
/* Get each reserved primary GDT block and verify it holds backups */
for (res = 0; res < reserved_gdb; res++, blk++) {
if (le32_to_cpu(*data) != blk) {
ext3_warning(sb, __func__,
"reserved block "E3FSBLK
" not at offset %ld",
blk,
(long)(data - (__le32 *)dind->b_data));
err = -EINVAL;
goto exit_bh;
}
primary[res] = sb_bread(sb, blk);
if (!primary[res]) {
err = -EIO;
goto exit_bh;
}
if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
brelse(primary[res]);
err = gdbackups;
goto exit_bh;
}
if (++data >= end)
data = (__le32 *)dind->b_data;
}
for (i = 0; i < reserved_gdb; i++) {
if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
/*
int j;
for (j = 0; j < i; j++)
ext3_journal_release_buffer(handle, primary[j]);
*/
goto exit_bh;
}
}
if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
goto exit_bh;
/*
* Finally we can add each of the reserved backup GDT blocks from
* the new group to its reserved primary GDT block.
*/
blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
for (i = 0; i < reserved_gdb; i++) {
int err2;
data = (__le32 *)primary[i]->b_data;
/* printk("reserving backup %lu[%u] = %lu\n",
primary[i]->b_blocknr, gdbackups,
blk + primary[i]->b_blocknr); */
data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
err2 = ext3_journal_dirty_metadata(handle, primary[i]);
if (!err)
err = err2;
}
inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
ext3_mark_iloc_dirty(handle, inode, &iloc);
exit_bh:
while (--res >= 0)
brelse(primary[res]);
brelse(dind);
exit_free:
kfree(primary);
return err;
}
/*
* Update the backup copies of the ext3 metadata. These don't need to be part
* of the main resize transaction, because e2fsck will re-write them if there
* is a problem (basically only OOM will cause a problem). However, we
* _should_ update the backups if possible, in case the primary gets trashed
* for some reason and we need to run e2fsck from a backup superblock. The
* important part is that the new block and inode counts are in the backup
* superblocks, and the location of the new group metadata in the GDT backups.
*
* We do not need take the s_resize_lock for this, because these
* blocks are not otherwise touched by the filesystem code when it is
* mounted. We don't need to worry about last changing from
* sbi->s_groups_count, because the worst that can happen is that we
* do not copy the full number of backups at this time. The resize
* which changed s_groups_count will backup again.
*/
static void update_backups(struct super_block *sb,
int blk_off, char *data, int size)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
const unsigned long last = sbi->s_groups_count;
const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
unsigned three = 1;
unsigned five = 5;
unsigned seven = 7;
unsigned group;
int rest = sb->s_blocksize - size;
handle_t *handle;
int err = 0, err2;
handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
if (IS_ERR(handle)) {
group = 1;
err = PTR_ERR(handle);
goto exit_err;
}
while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
struct buffer_head *bh;
/* Out of journal space, and can't get more - abort - so sad */
if (handle->h_buffer_credits == 0 &&
ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
(err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
break;
bh = sb_getblk(sb, group * bpg + blk_off);
if (!bh) {
err = -EIO;
break;
}
ext3_debug("update metadata backup %#04lx\n",
(unsigned long)bh->b_blocknr);
if ((err = ext3_journal_get_write_access(handle, bh))) {
brelse(bh);
break;
}
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
memset(bh->b_data + size, 0, rest);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext3_journal_dirty_metadata(handle, bh);
brelse(bh);
if (err)
break;
}
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
/*
* Ugh! Need to have e2fsck write the backup copies. It is too
* late to revert the resize, we shouldn't fail just because of
* the backup copies (they are only needed in case of corruption).
*
* However, if we got here we have a journal problem too, so we
* can't really start a transaction to mark the superblock.
* Chicken out and just set the flag on the hope it will be written
* to disk, and if not - we will simply wait until next fsck.
*/
exit_err:
if (err) {
ext3_warning(sb, __func__,
"can't update backup for group %d (err %d), "
"forcing fsck on next reboot", group, err);
sbi->s_mount_state &= ~EXT3_VALID_FS;
sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
mark_buffer_dirty(sbi->s_sbh);
}
}
/* Add group descriptor data to an existing or new group descriptor block.
* Ensure we handle all possible error conditions _before_ we start modifying
* the filesystem, because we cannot abort the transaction and not have it
* write the data to disk.
*
* If we are on a GDT block boundary, we need to get the reserved GDT block.
* Otherwise, we may need to add backup GDT blocks for a sparse group.
*
* We only need to hold the superblock lock while we are actually adding
* in the new group's counts to the superblock. Prior to that we have
* not really "added" the group at all. We re-check that we are still
* adding in the last group in case things have changed since verifying.
*/
int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
struct buffer_head *primary = NULL;
struct ext3_group_desc *gdp;
struct inode *inode = NULL;
handle_t *handle;
int gdb_off, gdb_num;
int err, err2;
gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ext3_warning(sb, __func__,
"Can't resize non-sparse filesystem further");
return -EPERM;
}
if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
le32_to_cpu(es->s_blocks_count)) {
ext3_warning(sb, __func__, "blocks_count overflow\n");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
ext3_warning(sb, __func__, "inodes_count overflow\n");
return -EINVAL;
}
if (reserved_gdb || gdb_off == 0) {
if (!EXT3_HAS_COMPAT_FEATURE(sb,
EXT3_FEATURE_COMPAT_RESIZE_INODE)
|| !le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext3_warning(sb, __func__,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext3_iget(sb, EXT3_RESIZE_INO);
if (IS_ERR(inode)) {
ext3_warning(sb, __func__,
"Error opening resize inode");
return PTR_ERR(inode);
}
}
if ((err = verify_group_input(sb, input)))
goto exit_put;
if ((err = setup_new_group_blocks(sb, input)))
goto exit_put;
/*
* We will always be modifying at least the superblock and a GDT
* block. If we are adding a group past the last current GDT block,
* we will also modify the inode and the dindirect block. If we
* are adding a group with superblock/GDT backups we will also
* modify each of the reserved GDT dindirect blocks.
*/
handle = ext3_journal_start_sb(sb,
ext3_bg_has_super(sb, input->group) ?
3 + reserved_gdb : 4);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto exit_put;
}
mutex_lock(&sbi->s_resize_lock);
if (input->group != sbi->s_groups_count) {
ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
/*
* We will only either add reserved group blocks to a backup group
* or remove reserved blocks for the first group in a new group block.
* Doing both would be mean more complex code, and sane people don't
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
primary = sbi->s_group_desc[gdb_num];
if ((err = ext3_journal_get_write_access(handle, primary)))
goto exit_journal;
if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
(err = reserve_backup_gdb(handle, inode, input)))
goto exit_journal;
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
goto exit_journal;
/*
* OK, now we've set up the new group. Time to make it active.
*
* We do not lock all allocations via s_resize_lock
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
*
* The key field here is sbi->s_groups_count: as long as
* that retains its old value, nobody is going to access the new
* group.
*
* So first we update all the descriptor metadata for the new
* group; then we update the total disk blocks count; then we
* update the groups count to enable the group; then finally we
* update the free space counts so that the system can start
* using the new disk blocks.
*/
/* Update group descriptor block for new group */
gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
gdp->bg_inode_table = cpu_to_le32(input->inode_table);
gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
/*
* Make the new blocks and inodes valid next. We do this before
* increasing the group count so that once the group is enabled,
* all of its blocks and inodes are already valid.
*
* We always allocate group-by-group, then block-by-block or
* inode-by-inode within a group, so enabling these
* blocks/inodes before the group is live won't actually let us
* allocate the new space yet.
*/
le32_add_cpu(&es->s_blocks_count, input->blocks_count);
le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
/*
* We need to protect s_groups_count against other CPUs seeing
* inconsistent state in the superblock.
*
* The precise rules we use are:
*
* * Writers of s_groups_count *must* hold s_resize_lock
* AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
* * Readers must hold s_resize_lock over the access
* OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
*
* NB. These rules can be relaxed when checking the group count
* while freeing data, as we can only allocate from a block
* group after serialising against the group count, and we can
* only then free after serialising in turn against that
* allocation.
*/
smp_wmb();
/* Update the global fs size fields */
sbi->s_groups_count++;
err = ext3_journal_dirty_metadata(handle, primary);
if (err)
goto exit_journal;
/* Update the reserved block counts only once the new group is
* active. */
le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeblocks_counter,
input->free_blocks_count);
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT3_INODES_PER_GROUP(sb));
err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
exit_journal:
mutex_unlock(&sbi->s_resize_lock);
if ((err2 = ext3_journal_stop(handle)) && !err)
err = err2;
if (!err) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext3_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
primary->b_size);
}
exit_put:
iput(inode);
return err;
} /* ext3_group_add */
/* Extend the filesystem to the new number of blocks specified. This entry
* point is only used to extend the current filesystem to the end of the last
* existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
* for emergencies (because it has no dependencies on reserved blocks).
*
* If we _really_ wanted, we could use default values to call ext3_group_add()
* allow the "remount" trick to work for arbitrary resizing, assuming enough
* GDT blocks are reserved to grow to the desired size.
*/
int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
ext3_fsblk_t n_blocks_count)
{
ext3_fsblk_t o_blocks_count;
ext3_grpblk_t last;
ext3_grpblk_t add;
struct buffer_head * bh;
handle_t *handle;
int err;
unsigned long freed_blocks;
/* We don't need to worry about locking wrt other resizers just
* yet: we're going to revalidate es->s_blocks_count after
* taking the s_resize_lock below. */
o_blocks_count = le32_to_cpu(es->s_blocks_count);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
" up to "E3FSBLK" blocks\n",
o_blocks_count, n_blocks_count);
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
return 0;
if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
printk(KERN_ERR "EXT3-fs: filesystem on %s:"
" too large to resize to "E3FSBLK" blocks safely\n",
sb->s_id, n_blocks_count);
if (sizeof(sector_t) < 8)
ext3_warning(sb, __func__,
"CONFIG_LBDAF not enabled\n");
return -EINVAL;
}
if (n_blocks_count < o_blocks_count) {
ext3_warning(sb, __func__,
"can't shrink FS - resize aborted");
return -EBUSY;
}
/* Handle the remaining blocks in the last group only. */
last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
EXT3_BLOCKS_PER_GROUP(sb);
if (last == 0) {
ext3_warning(sb, __func__,
"need to use ext2online to resize further");
return -EPERM;
}
add = EXT3_BLOCKS_PER_GROUP(sb) - last;
if (o_blocks_count + add < o_blocks_count) {
ext3_warning(sb, __func__, "blocks_count overflow");
return -EINVAL;
}
if (o_blocks_count + add > n_blocks_count)
add = n_blocks_count - o_blocks_count;
if (o_blocks_count + add < n_blocks_count)
ext3_warning(sb, __func__,
"will only finish group ("E3FSBLK
" blocks, %u new)",
o_blocks_count + add, add);
/* See if the device is actually as big as what was requested */
bh = sb_bread(sb, o_blocks_count + add -1);
if (!bh) {
ext3_warning(sb, __func__,
"can't read last block, resize aborted");
return -ENOSPC;
}
brelse(bh);
/* We will update the superblock, one block bitmap, and
* one group descriptor via ext3_free_blocks().
*/
handle = ext3_journal_start_sb(sb, 3);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ext3_warning(sb, __func__, "error %d on journal start",err);
goto exit_put;
}
mutex_lock(&EXT3_SB(sb)->s_resize_lock);
if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
ext3_warning(sb, __func__,
"multiple resizers run on filesystem!");
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
ext3_journal_stop(handle);
err = -EBUSY;
goto exit_put;
}
if ((err = ext3_journal_get_write_access(handle,
EXT3_SB(sb)->s_sbh))) {
ext3_warning(sb, __func__,
"error %d on journal write access", err);
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
ext3_journal_stop(handle);
goto exit_put;
}
es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
if (err) {
ext3_warning(sb, __func__,
"error %d on journal dirty metadata", err);
ext3_journal_stop(handle);
goto exit_put;
}
ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
o_blocks_count, o_blocks_count + add);
ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
o_blocks_count, o_blocks_count + add);
if ((err = ext3_journal_stop(handle)))
goto exit_put;
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
le32_to_cpu(es->s_blocks_count));
update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext3_super_block));
exit_put:
return err;
} /* ext3_group_extend */
| gpl-2.0 |
CyanogenMod/android_kernel_htc_msm8960 | arch/c6x/platforms/megamod-pic.c | 4470 | 8074 | /*
* Support for C64x+ Megamodule Interrupt Controller
*
* Copyright (C) 2010, 2011 Texas Instruments Incorporated
* Contributed by: Mark Salter <msalter@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <asm/soc.h>
#include <asm/megamod-pic.h>
#define NR_COMBINERS 4
#define NR_MUX_OUTPUTS 12
#define IRQ_UNMAPPED 0xffff
/*
* Megamodule Interrupt Controller register layout
*/
struct megamod_regs {
u32 evtflag[8];
u32 evtset[8];
u32 evtclr[8];
u32 reserved0[8];
u32 evtmask[8];
u32 mevtflag[8];
u32 expmask[8];
u32 mexpflag[8];
u32 intmux_unused;
u32 intmux[7];
u32 reserved1[8];
u32 aegmux[2];
u32 reserved2[14];
u32 intxstat;
u32 intxclr;
u32 intdmask;
u32 reserved3[13];
u32 evtasrt;
};
struct megamod_pic {
struct irq_domain *irqhost;
struct megamod_regs __iomem *regs;
raw_spinlock_t lock;
/* hw mux mapping */
unsigned int output_to_irq[NR_MUX_OUTPUTS];
};
static struct megamod_pic *mm_pic;
struct megamod_cascade_data {
struct megamod_pic *pic;
int index;
};
static struct megamod_cascade_data cascade_data[NR_COMBINERS];
static void mask_megamod(struct irq_data *data)
{
struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
irq_hw_number_t src = irqd_to_hwirq(data);
u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
raw_spin_lock(&pic->lock);
soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
raw_spin_unlock(&pic->lock);
}
static void unmask_megamod(struct irq_data *data)
{
struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
irq_hw_number_t src = irqd_to_hwirq(data);
u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
raw_spin_lock(&pic->lock);
soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
raw_spin_unlock(&pic->lock);
}
static struct irq_chip megamod_chip = {
.name = "megamod",
.irq_mask = mask_megamod,
.irq_unmask = unmask_megamod,
};
static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
struct megamod_cascade_data *cascade;
struct megamod_pic *pic;
u32 events;
int n, idx;
cascade = irq_desc_get_handler_data(desc);
pic = cascade->pic;
idx = cascade->index;
while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
n = __ffs(events);
irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
soc_writel(1 << n, &pic->regs->evtclr[idx]);
generic_handle_irq(irq);
}
}
static int megamod_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct megamod_pic *pic = h->host_data;
int i;
/* We shouldn't see a hwirq which is muxed to core controller */
for (i = 0; i < NR_MUX_OUTPUTS; i++)
if (pic->output_to_irq[i] == hw)
return -1;
irq_set_chip_data(virq, pic);
irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops megamod_domain_ops = {
.map = megamod_map,
.xlate = irq_domain_xlate_onecell,
};
static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
{
int index, offset;
u32 val;
if (src < 0 || src >= (NR_COMBINERS * 32)) {
pic->output_to_irq[output] = IRQ_UNMAPPED;
return;
}
/* four mappings per mux register */
index = output / 4;
offset = (output & 3) * 8;
val = soc_readl(&pic->regs->intmux[index]);
val &= ~(0xff << offset);
val |= src << offset;
soc_writel(val, &pic->regs->intmux[index]);
}
/*
* Parse the MUX mapping, if one exists.
*
* The MUX map is an array of up to 12 cells; one for each usable core priority
* interrupt. The value of a given cell is the megamodule interrupt source
* which is to me MUXed to the output corresponding to the cell position
* withing the array. The first cell in the array corresponds to priority
* 4 and the last (12th) cell corresponds to priority 15. The allowed
* values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
* sources (0 - 3) are not allowed to be mapped through this property. They
* are handled through the "interrupts" property. This allows us to use a
* value of zero as a "do not map" placeholder.
*/
static void __init parse_priority_map(struct megamod_pic *pic,
int *mapping, int size)
{
struct device_node *np = pic->irqhost->of_node;
const __be32 *map;
int i, maplen;
u32 val;
map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
if (map) {
maplen /= 4;
if (maplen > size)
maplen = size;
for (i = 0; i < maplen; i++) {
val = be32_to_cpup(map);
if (val && val >= 4)
mapping[i] = val;
++map;
}
}
}
static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
{
struct megamod_pic *pic;
int i, irq;
int mapping[NR_MUX_OUTPUTS];
pr_info("Initializing C64x+ Megamodule PIC\n");
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
if (!pic) {
pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
return NULL;
}
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
&megamod_domain_ops, pic);
if (!pic->irqhost) {
pr_err("%s: Could not alloc host.\n", np->full_name);
goto error_free;
}
pic->irqhost->host_data = pic;
raw_spin_lock_init(&pic->lock);
pic->regs = of_iomap(np, 0);
if (!pic->regs) {
pr_err("%s: Could not map registers.\n", np->full_name);
goto error_free;
}
/* Initialize MUX map */
for (i = 0; i < ARRAY_SIZE(mapping); i++)
mapping[i] = IRQ_UNMAPPED;
parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
/*
* We can have up to 12 interrupts cascading to the core controller.
* These cascades can be from the combined interrupt sources or for
* individual interrupt sources. The "interrupts" property only
* deals with the cascaded combined interrupts. The individual
* interrupts muxed to the core controller use the core controller
* as their interrupt parent.
*/
for (i = 0; i < NR_COMBINERS; i++) {
irq = irq_of_parse_and_map(np, i);
if (irq == NO_IRQ)
continue;
/*
* We count on the core priority interrupts (4 - 15) being
* direct mapped. Check that device tree provided something
* in that range.
*/
if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
pr_err("%s: combiner-%d virq %d out of range!\n",
np->full_name, i, irq);
continue;
}
/* record the mapping */
mapping[irq - 4] = i;
pr_debug("%s: combiner-%d cascading to virq %d\n",
np->full_name, i, irq);
cascade_data[i].pic = pic;
cascade_data[i].index = i;
/* mask and clear all events in combiner */
soc_writel(~0, &pic->regs->evtmask[i]);
soc_writel(~0, &pic->regs->evtclr[i]);
irq_set_handler_data(irq, &cascade_data[i]);
irq_set_chained_handler(irq, megamod_irq_cascade);
}
/* Finally, set up the MUX registers */
for (i = 0; i < NR_MUX_OUTPUTS; i++) {
if (mapping[i] != IRQ_UNMAPPED) {
pr_debug("%s: setting mux %d to priority %d\n",
np->full_name, mapping[i], i + 4);
set_megamod_mux(pic, mapping[i], i);
}
}
return pic;
error_free:
kfree(pic);
return NULL;
}
/*
* Return next active event after ACK'ing it.
* Return -1 if no events active.
*/
static int get_exception(void)
{
int i, bit;
u32 mask;
for (i = 0; i < NR_COMBINERS; i++) {
mask = soc_readl(&mm_pic->regs->mexpflag[i]);
if (mask) {
bit = __ffs(mask);
soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
return (i * 32) + bit;
}
}
return -1;
}
static void assert_event(unsigned int val)
{
soc_writel(val, &mm_pic->regs->evtasrt);
}
void __init megamod_pic_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
if (!np)
return;
mm_pic = init_megamod_pic(np);
of_node_put(np);
soc_ops.get_exception = get_exception;
soc_ops.assert_event = assert_event;
return;
}
| gpl-2.0 |
CyanideL/android_kernel_samsung_klte | drivers/rtc/rtc-fm3130.c | 4982 | 16888 | /*
* rtc-fm3130.c - RTC driver for Ramtron FM3130 I2C chip.
*
* Copyright (C) 2008 Sergey Lapin
* Based on ds1307 driver by James Chapman and David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/slab.h>
#define FM3130_RTC_CONTROL (0x0)
#define FM3130_CAL_CONTROL (0x1)
#define FM3130_RTC_SECONDS (0x2)
#define FM3130_RTC_MINUTES (0x3)
#define FM3130_RTC_HOURS (0x4)
#define FM3130_RTC_DAY (0x5)
#define FM3130_RTC_DATE (0x6)
#define FM3130_RTC_MONTHS (0x7)
#define FM3130_RTC_YEARS (0x8)
#define FM3130_ALARM_SECONDS (0x9)
#define FM3130_ALARM_MINUTES (0xa)
#define FM3130_ALARM_HOURS (0xb)
#define FM3130_ALARM_DATE (0xc)
#define FM3130_ALARM_MONTHS (0xd)
#define FM3130_ALARM_WP_CONTROL (0xe)
#define FM3130_CAL_CONTROL_BIT_nOSCEN (1 << 7) /* Osciallator enabled */
#define FM3130_RTC_CONTROL_BIT_LB (1 << 7) /* Low battery */
#define FM3130_RTC_CONTROL_BIT_AF (1 << 6) /* Alarm flag */
#define FM3130_RTC_CONTROL_BIT_CF (1 << 5) /* Century overflow */
#define FM3130_RTC_CONTROL_BIT_POR (1 << 4) /* Power on reset */
#define FM3130_RTC_CONTROL_BIT_AEN (1 << 3) /* Alarm enable */
#define FM3130_RTC_CONTROL_BIT_CAL (1 << 2) /* Calibration mode */
#define FM3130_RTC_CONTROL_BIT_WRITE (1 << 1) /* W=1 -> write mode W=0 normal */
#define FM3130_RTC_CONTROL_BIT_READ (1 << 0) /* R=1 -> read mode R=0 normal */
#define FM3130_CLOCK_REGS 7
#define FM3130_ALARM_REGS 5
struct fm3130 {
u8 reg_addr_time;
u8 reg_addr_alarm;
u8 regs[15];
struct i2c_msg msg[4];
struct i2c_client *client;
struct rtc_device *rtc;
int alarm_valid;
int data_valid;
};
static const struct i2c_device_id fm3130_id[] = {
{ "fm3130", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, fm3130_id);
#define FM3130_MODE_NORMAL 0
#define FM3130_MODE_WRITE 1
#define FM3130_MODE_READ 2
static void fm3130_rtc_mode(struct device *dev, int mode)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
switch (mode) {
case FM3130_MODE_NORMAL:
fm3130->regs[FM3130_RTC_CONTROL] &=
~(FM3130_RTC_CONTROL_BIT_WRITE |
FM3130_RTC_CONTROL_BIT_READ);
break;
case FM3130_MODE_WRITE:
fm3130->regs[FM3130_RTC_CONTROL] |= FM3130_RTC_CONTROL_BIT_WRITE;
break;
case FM3130_MODE_READ:
fm3130->regs[FM3130_RTC_CONTROL] |= FM3130_RTC_CONTROL_BIT_READ;
break;
default:
dev_dbg(dev, "invalid mode %d\n", mode);
break;
}
i2c_smbus_write_byte_data(fm3130->client,
FM3130_RTC_CONTROL, fm3130->regs[FM3130_RTC_CONTROL]);
}
static int fm3130_get_time(struct device *dev, struct rtc_time *t)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
int tmp;
if (!fm3130->data_valid) {
/* We have invalid data in RTC, probably due
to battery faults or other problems. Return EIO
for now, it will allow us to set data later instead
of error during probing which disables device */
return -EIO;
}
fm3130_rtc_mode(dev, FM3130_MODE_READ);
/* read the RTC date and time registers all at once */
tmp = i2c_transfer(to_i2c_adapter(fm3130->client->dev.parent),
fm3130->msg, 2);
if (tmp != 2) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
}
fm3130_rtc_mode(dev, FM3130_MODE_NORMAL);
dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x"
"%02x %02x %02x %02x %02x %02x %02x\n",
"read",
fm3130->regs[0], fm3130->regs[1],
fm3130->regs[2], fm3130->regs[3],
fm3130->regs[4], fm3130->regs[5],
fm3130->regs[6], fm3130->regs[7],
fm3130->regs[8], fm3130->regs[9],
fm3130->regs[0xa], fm3130->regs[0xb],
fm3130->regs[0xc], fm3130->regs[0xd],
fm3130->regs[0xe]);
t->tm_sec = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
t->tm_min = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
tmp = fm3130->regs[FM3130_RTC_HOURS] & 0x3f;
t->tm_hour = bcd2bin(tmp);
t->tm_wday = bcd2bin(fm3130->regs[FM3130_RTC_DAY] & 0x07) - 1;
t->tm_mday = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
tmp = fm3130->regs[FM3130_RTC_MONTHS] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
/* assume 20YY not 19YY, and ignore CF bit */
t->tm_year = bcd2bin(fm3130->regs[FM3130_RTC_YEARS]) + 100;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
/* initial clock setting can be undefined */
return rtc_valid_tm(t);
}
static int fm3130_set_time(struct device *dev, struct rtc_time *t)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
int tmp, i;
u8 *buf = fm3130->regs;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"write", t->tm_sec, t->tm_min,
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
/* first register addr */
buf[FM3130_RTC_SECONDS] = bin2bcd(t->tm_sec);
buf[FM3130_RTC_MINUTES] = bin2bcd(t->tm_min);
buf[FM3130_RTC_HOURS] = bin2bcd(t->tm_hour);
buf[FM3130_RTC_DAY] = bin2bcd(t->tm_wday + 1);
buf[FM3130_RTC_DATE] = bin2bcd(t->tm_mday);
buf[FM3130_RTC_MONTHS] = bin2bcd(t->tm_mon + 1);
/* assume 20YY not 19YY */
tmp = t->tm_year - 100;
buf[FM3130_RTC_YEARS] = bin2bcd(tmp);
dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x"
"%02x %02x %02x %02x %02x %02x %02x %02x\n",
"write", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6], buf[7],
buf[8], buf[9], buf[0xa], buf[0xb],
buf[0xc], buf[0xd], buf[0xe]);
fm3130_rtc_mode(dev, FM3130_MODE_WRITE);
/* Writing time registers, we don't support multibyte transfers */
for (i = 0; i < FM3130_CLOCK_REGS; i++) {
i2c_smbus_write_byte_data(fm3130->client,
FM3130_RTC_SECONDS + i,
fm3130->regs[FM3130_RTC_SECONDS + i]);
}
fm3130_rtc_mode(dev, FM3130_MODE_NORMAL);
/* We assume here that data are valid once written */
if (!fm3130->data_valid)
fm3130->data_valid = 1;
return 0;
}
static int fm3130_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
int tmp;
struct rtc_time *tm = &alrm->time;
if (!fm3130->alarm_valid) {
/*
* We have invalid alarm in RTC, probably due to battery faults
* or other problems. Return EIO for now, it will allow us to
* set alarm value later instead of error during probing which
* disables device
*/
return -EIO;
}
/* read the RTC alarm registers all at once */
tmp = i2c_transfer(to_i2c_adapter(fm3130->client->dev.parent),
&fm3130->msg[2], 2);
if (tmp != 2) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
}
dev_dbg(dev, "alarm read %02x %02x %02x %02x %02x\n",
fm3130->regs[FM3130_ALARM_SECONDS],
fm3130->regs[FM3130_ALARM_MINUTES],
fm3130->regs[FM3130_ALARM_HOURS],
fm3130->regs[FM3130_ALARM_DATE],
fm3130->regs[FM3130_ALARM_MONTHS]);
tm->tm_sec = bcd2bin(fm3130->regs[FM3130_ALARM_SECONDS] & 0x7F);
tm->tm_min = bcd2bin(fm3130->regs[FM3130_ALARM_MINUTES] & 0x7F);
tm->tm_hour = bcd2bin(fm3130->regs[FM3130_ALARM_HOURS] & 0x3F);
tm->tm_mday = bcd2bin(fm3130->regs[FM3130_ALARM_DATE] & 0x3F);
tm->tm_mon = bcd2bin(fm3130->regs[FM3130_ALARM_MONTHS] & 0x1F);
if (tm->tm_mon > 0)
tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read alarm", tm->tm_sec, tm->tm_min,
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
/* check if alarm enabled */
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
if ((fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_AEN) &&
(~fm3130->regs[FM3130_RTC_CONTROL] &
FM3130_RTC_CONTROL_BIT_CAL)) {
alrm->enabled = 1;
}
return 0;
}
static int fm3130_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
struct rtc_time *tm = &alrm->time;
int i;
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"write alarm", tm->tm_sec, tm->tm_min,
tm->tm_hour, tm->tm_mday,
tm->tm_mon, tm->tm_year, tm->tm_wday);
fm3130->regs[FM3130_ALARM_SECONDS] =
(tm->tm_sec != -1) ? bin2bcd(tm->tm_sec) : 0x80;
fm3130->regs[FM3130_ALARM_MINUTES] =
(tm->tm_min != -1) ? bin2bcd(tm->tm_min) : 0x80;
fm3130->regs[FM3130_ALARM_HOURS] =
(tm->tm_hour != -1) ? bin2bcd(tm->tm_hour) : 0x80;
fm3130->regs[FM3130_ALARM_DATE] =
(tm->tm_mday != -1) ? bin2bcd(tm->tm_mday) : 0x80;
fm3130->regs[FM3130_ALARM_MONTHS] =
(tm->tm_mon != -1) ? bin2bcd(tm->tm_mon + 1) : 0x80;
dev_dbg(dev, "alarm write %02x %02x %02x %02x %02x\n",
fm3130->regs[FM3130_ALARM_SECONDS],
fm3130->regs[FM3130_ALARM_MINUTES],
fm3130->regs[FM3130_ALARM_HOURS],
fm3130->regs[FM3130_ALARM_DATE],
fm3130->regs[FM3130_ALARM_MONTHS]);
/* Writing time registers, we don't support multibyte transfers */
for (i = 0; i < FM3130_ALARM_REGS; i++) {
i2c_smbus_write_byte_data(fm3130->client,
FM3130_ALARM_SECONDS + i,
fm3130->regs[FM3130_ALARM_SECONDS + i]);
}
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
/* enable or disable alarm */
if (alrm->enabled) {
i2c_smbus_write_byte_data(fm3130->client, FM3130_RTC_CONTROL,
(fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL)) |
FM3130_RTC_CONTROL_BIT_AEN);
} else {
i2c_smbus_write_byte_data(fm3130->client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL) &
~(FM3130_RTC_CONTROL_BIT_AEN));
}
/* We assume here that data is valid once written */
if (!fm3130->alarm_valid)
fm3130->alarm_valid = 1;
return 0;
}
static int fm3130_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct fm3130 *fm3130 = dev_get_drvdata(dev);
int ret = 0;
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(fm3130->client, FM3130_RTC_CONTROL);
dev_dbg(dev, "alarm_irq_enable: enable=%d, FM3130_RTC_CONTROL=%02x\n",
enabled, fm3130->regs[FM3130_RTC_CONTROL]);
switch (enabled) {
case 0: /* alarm off */
ret = i2c_smbus_write_byte_data(fm3130->client,
FM3130_RTC_CONTROL, fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL) &
~(FM3130_RTC_CONTROL_BIT_AEN));
break;
case 1: /* alarm on */
ret = i2c_smbus_write_byte_data(fm3130->client,
FM3130_RTC_CONTROL, (fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL)) |
FM3130_RTC_CONTROL_BIT_AEN);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static const struct rtc_class_ops fm3130_rtc_ops = {
.read_time = fm3130_get_time,
.set_time = fm3130_set_time,
.read_alarm = fm3130_read_alarm,
.set_alarm = fm3130_set_alarm,
.alarm_irq_enable = fm3130_alarm_irq_enable,
};
static struct i2c_driver fm3130_driver;
static int __devinit fm3130_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct fm3130 *fm3130;
int err = -ENODEV;
int tmp;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
if (!i2c_check_functionality(adapter,
I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EIO;
fm3130 = kzalloc(sizeof(struct fm3130), GFP_KERNEL);
if (!fm3130)
return -ENOMEM;
fm3130->client = client;
i2c_set_clientdata(client, fm3130);
fm3130->reg_addr_time = FM3130_RTC_SECONDS;
fm3130->reg_addr_alarm = FM3130_ALARM_SECONDS;
/* Messages to read time */
fm3130->msg[0].addr = client->addr;
fm3130->msg[0].flags = 0;
fm3130->msg[0].len = 1;
fm3130->msg[0].buf = &fm3130->reg_addr_time;
fm3130->msg[1].addr = client->addr;
fm3130->msg[1].flags = I2C_M_RD;
fm3130->msg[1].len = FM3130_CLOCK_REGS;
fm3130->msg[1].buf = &fm3130->regs[FM3130_RTC_SECONDS];
/* Messages to read alarm */
fm3130->msg[2].addr = client->addr;
fm3130->msg[2].flags = 0;
fm3130->msg[2].len = 1;
fm3130->msg[2].buf = &fm3130->reg_addr_alarm;
fm3130->msg[3].addr = client->addr;
fm3130->msg[3].flags = I2C_M_RD;
fm3130->msg[3].len = FM3130_ALARM_REGS;
fm3130->msg[3].buf = &fm3130->regs[FM3130_ALARM_SECONDS];
fm3130->alarm_valid = 0;
fm3130->data_valid = 0;
tmp = i2c_transfer(adapter, fm3130->msg, 4);
if (tmp != 4) {
pr_debug("read error %d\n", tmp);
err = -EIO;
goto exit_free;
}
fm3130->regs[FM3130_RTC_CONTROL] =
i2c_smbus_read_byte_data(client, FM3130_RTC_CONTROL);
fm3130->regs[FM3130_CAL_CONTROL] =
i2c_smbus_read_byte_data(client, FM3130_CAL_CONTROL);
/* Disabling calibration mode */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL));
dev_warn(&client->dev, "Disabling calibration mode!\n");
}
/* Disabling read and write modes */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_READ |
FM3130_RTC_CONTROL_BIT_WRITE));
dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
}
/* oscillator off? turn it on, so clock can tick. */
if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
i2c_smbus_write_byte_data(client, FM3130_CAL_CONTROL,
fm3130->regs[FM3130_CAL_CONTROL] &
~(FM3130_CAL_CONTROL_BIT_nOSCEN));
/* low battery? clear flag, and warn */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_LB) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_LB));
dev_warn(&client->dev, "Low battery!\n");
}
/* check if Power On Reset bit is set */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_POR) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~FM3130_RTC_CONTROL_BIT_POR);
dev_dbg(&client->dev, "POR bit is set\n");
}
/* ACS is controlled by alarm */
i2c_smbus_write_byte_data(client, FM3130_ALARM_WP_CONTROL, 0x80);
/* alarm registers sanity check */
tmp = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
if (tmp > 59)
goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
if (tmp > 59)
goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_HOURS] & 0x3f);
if (tmp > 23)
goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
if (tmp == 0 || tmp > 31)
goto bad_alarm;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
if (tmp == 0 || tmp > 12)
goto bad_alarm;
fm3130->alarm_valid = 1;
bad_alarm:
/* clock registers sanity chek */
tmp = bcd2bin(fm3130->regs[FM3130_RTC_SECONDS] & 0x7f);
if (tmp > 59)
goto bad_clock;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MINUTES] & 0x7f);
if (tmp > 59)
goto bad_clock;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_HOURS] & 0x3f);
if (tmp > 23)
goto bad_clock;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_DAY] & 0x7);
if (tmp == 0 || tmp > 7)
goto bad_clock;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_DATE] & 0x3f);
if (tmp == 0 || tmp > 31)
goto bad_clock;
tmp = bcd2bin(fm3130->regs[FM3130_RTC_MONTHS] & 0x1f);
if (tmp == 0 || tmp > 12)
goto bad_clock;
fm3130->data_valid = 1;
bad_clock:
if (!fm3130->data_valid || !fm3130->alarm_valid)
dev_dbg(&client->dev,
"%s: %02x %02x %02x %02x %02x %02x %02x %02x"
"%02x %02x %02x %02x %02x %02x %02x\n",
"bogus registers",
fm3130->regs[0], fm3130->regs[1],
fm3130->regs[2], fm3130->regs[3],
fm3130->regs[4], fm3130->regs[5],
fm3130->regs[6], fm3130->regs[7],
fm3130->regs[8], fm3130->regs[9],
fm3130->regs[0xa], fm3130->regs[0xb],
fm3130->regs[0xc], fm3130->regs[0xd],
fm3130->regs[0xe]);
/* We won't bail out here because we just got invalid data.
Time setting from u-boot doesn't work anyway */
fm3130->rtc = rtc_device_register(client->name, &client->dev,
&fm3130_rtc_ops, THIS_MODULE);
if (IS_ERR(fm3130->rtc)) {
err = PTR_ERR(fm3130->rtc);
dev_err(&client->dev,
"unable to register the class device\n");
goto exit_free;
}
return 0;
exit_free:
kfree(fm3130);
return err;
}
static int __devexit fm3130_remove(struct i2c_client *client)
{
struct fm3130 *fm3130 = i2c_get_clientdata(client);
rtc_device_unregister(fm3130->rtc);
kfree(fm3130);
return 0;
}
static struct i2c_driver fm3130_driver = {
.driver = {
.name = "rtc-fm3130",
.owner = THIS_MODULE,
},
.probe = fm3130_probe,
.remove = __devexit_p(fm3130_remove),
.id_table = fm3130_id,
};
module_i2c_driver(fm3130_driver);
MODULE_DESCRIPTION("RTC driver for FM3130");
MODULE_AUTHOR("Sergey Lapin <slapin@ossfans.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Electrex/Electroactive-N5 | drivers/spi/spi-ti-ssp.c | 5238 | 9381 | /*
* Sequencer Serial Port (SSP) based SPI master driver
*
* Copyright (C) 2010 Texas Instruments Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/mfd/ti_ssp.h>
#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH)
struct ti_ssp_spi {
struct spi_master *master;
struct device *dev;
spinlock_t lock;
struct list_head msg_queue;
struct completion complete;
bool shutdown;
struct workqueue_struct *workqueue;
struct work_struct work;
u8 mode, bpw;
int cs_active;
u32 pc_en, pc_dis, pc_wr, pc_rd;
void (*select)(int cs);
};
static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw)
{
u32 ret;
ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret);
return ret;
}
static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data)
{
ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL);
}
static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg,
struct spi_transfer *t)
{
int count;
if (hw->bpw <= 8) {
u8 *rx = t->rx_buf;
const u8 *tx = t->tx_buf;
for (count = 0; count < t->len; count += 1) {
if (t->tx_buf)
ti_ssp_spi_tx(hw, *tx++);
if (t->rx_buf)
*rx++ = ti_ssp_spi_rx(hw);
}
} else if (hw->bpw <= 16) {
u16 *rx = t->rx_buf;
const u16 *tx = t->tx_buf;
for (count = 0; count < t->len; count += 2) {
if (t->tx_buf)
ti_ssp_spi_tx(hw, *tx++);
if (t->rx_buf)
*rx++ = ti_ssp_spi_rx(hw);
}
} else {
u32 *rx = t->rx_buf;
const u32 *tx = t->tx_buf;
for (count = 0; count < t->len; count += 4) {
if (t->tx_buf)
ti_ssp_spi_tx(hw, *tx++);
if (t->rx_buf)
*rx++ = ti_ssp_spi_rx(hw);
}
}
msg->actual_length += count; /* bytes transferred */
dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n",
t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len,
hw->bpw, count, (count < t->len) ? " (under)" : "");
return (count < t->len) ? -EIO : 0; /* left over data */
}
static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active)
{
cs_active = !!cs_active;
if (cs_active == hw->cs_active)
return;
ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL);
hw->cs_active = cs_active;
}
#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \
cs_en | clk | SSP_COUNT((bits) * 2 - 1))
#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \
cs_en | clk | SSP_COUNT((bits) * 2 - 1))
static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode)
{
int error, idx = 0;
u32 seqram[16];
u32 cs_en, cs_dis, clk;
u32 topbits, botbits;
mode &= MODE_BITS;
if (mode == hw->mode && bpw == hw->bpw)
return 0;
cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW;
cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH;
clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW;
/* Construct instructions */
/* Disable Chip Select */
hw->pc_dis = idx;
seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk;
seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk;
/* Enable Chip Select */
hw->pc_en = idx;
seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk;
seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
/* Reads and writes need to be split for bpw > 16 */
topbits = (bpw > 16) ? 16 : bpw;
botbits = bpw - topbits;
/* Write */
hw->pc_wr = idx;
seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG;
if (botbits)
seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG;
seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
/* Read */
hw->pc_rd = idx;
if (botbits)
seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG;
seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG;
seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk;
error = ti_ssp_load(hw->dev, 0, seqram, idx);
if (error < 0)
return error;
error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ?
0 : SSP_EARLY_DIN));
if (error < 0)
return error;
hw->bpw = bpw;
hw->mode = mode;
return error;
}
static void ti_ssp_spi_work(struct work_struct *work)
{
struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work);
spin_lock(&hw->lock);
while (!list_empty(&hw->msg_queue)) {
struct spi_message *m;
struct spi_device *spi;
struct spi_transfer *t = NULL;
int status = 0;
m = container_of(hw->msg_queue.next, struct spi_message,
queue);
list_del_init(&m->queue);
spin_unlock(&hw->lock);
spi = m->spi;
if (hw->select)
hw->select(spi->chip_select);
list_for_each_entry(t, &m->transfers, transfer_list) {
int bpw = spi->bits_per_word;
int xfer_status;
if (t->bits_per_word)
bpw = t->bits_per_word;
if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0)
break;
ti_ssp_spi_chip_select(hw, 1);
xfer_status = ti_ssp_spi_txrx(hw, m, t);
if (xfer_status < 0)
status = xfer_status;
if (t->delay_usecs)
udelay(t->delay_usecs);
if (t->cs_change)
ti_ssp_spi_chip_select(hw, 0);
}
ti_ssp_spi_chip_select(hw, 0);
m->status = status;
m->complete(m->context);
spin_lock(&hw->lock);
}
if (hw->shutdown)
complete(&hw->complete);
spin_unlock(&hw->lock);
}
static int ti_ssp_spi_setup(struct spi_device *spi)
{
if (spi->bits_per_word > 32)
return -EINVAL;
return 0;
}
static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct ti_ssp_spi *hw;
struct spi_transfer *t;
int error = 0;
m->actual_length = 0;
m->status = -EINPROGRESS;
hw = spi_master_get_devdata(spi->master);
if (list_empty(&m->transfers) || !m->complete)
return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->len && !(t->rx_buf || t->tx_buf)) {
dev_err(&spi->dev, "invalid xfer, no buffer\n");
return -EINVAL;
}
if (t->len && t->rx_buf && t->tx_buf) {
dev_err(&spi->dev, "invalid xfer, full duplex\n");
return -EINVAL;
}
if (t->bits_per_word > 32) {
dev_err(&spi->dev, "invalid xfer width %d\n",
t->bits_per_word);
return -EINVAL;
}
}
spin_lock(&hw->lock);
if (hw->shutdown) {
error = -ESHUTDOWN;
goto error_unlock;
}
list_add_tail(&m->queue, &hw->msg_queue);
queue_work(hw->workqueue, &hw->work);
error_unlock:
spin_unlock(&hw->lock);
return error;
}
static int __devinit ti_ssp_spi_probe(struct platform_device *pdev)
{
const struct ti_ssp_spi_data *pdata;
struct ti_ssp_spi *hw;
struct spi_master *master;
struct device *dev = &pdev->dev;
int error = 0;
pdata = dev->platform_data;
if (!pdata) {
dev_err(dev, "platform data not found\n");
return -EINVAL;
}
master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi));
if (!master) {
dev_err(dev, "cannot allocate SPI master\n");
return -ENOMEM;
}
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
hw->master = master;
hw->dev = dev;
hw->select = pdata->select;
spin_lock_init(&hw->lock);
init_completion(&hw->complete);
INIT_LIST_HEAD(&hw->msg_queue);
INIT_WORK(&hw->work, ti_ssp_spi_work);
hw->workqueue = create_singlethread_workqueue(dev_name(dev));
if (!hw->workqueue) {
error = -ENOMEM;
dev_err(dev, "work queue creation failed\n");
goto error_wq;
}
error = ti_ssp_set_iosel(hw->dev, pdata->iosel);
if (error < 0) {
dev_err(dev, "io setup failed\n");
goto error_iosel;
}
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_cs;
master->mode_bits = MODE_BITS;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = ti_ssp_spi_setup;
master->transfer = ti_ssp_spi_transfer;
error = spi_register_master(master);
if (error) {
dev_err(dev, "master registration failed\n");
goto error_reg;
}
return 0;
error_reg:
error_iosel:
destroy_workqueue(hw->workqueue);
error_wq:
spi_master_put(master);
return error;
}
static int __devexit ti_ssp_spi_remove(struct platform_device *pdev)
{
struct ti_ssp_spi *hw = platform_get_drvdata(pdev);
int error;
hw->shutdown = 1;
while (!list_empty(&hw->msg_queue)) {
error = wait_for_completion_interruptible(&hw->complete);
if (error < 0) {
hw->shutdown = 0;
return error;
}
}
destroy_workqueue(hw->workqueue);
spi_unregister_master(hw->master);
return 0;
}
static struct platform_driver ti_ssp_spi_driver = {
.probe = ti_ssp_spi_probe,
.remove = __devexit_p(ti_ssp_spi_remove),
.driver = {
.name = "ti-ssp-spi",
.owner = THIS_MODULE,
},
};
module_platform_driver(ti_ssp_spi_driver);
MODULE_DESCRIPTION("SSP SPI Master");
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ti-ssp-spi");
| gpl-2.0 |
oppo-source/Find5-4.2-kernel-source | arch/arm/plat-mxc/devices/platform-ipu-core.c | 5494 | 3161 | /*
* Copyright (C) 2011 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/devices-common.h>
#define imx_ipu_core_entry_single(soc) \
{ \
.iobase = soc ## _IPU_CTRL_BASE_ADDR, \
.synirq = soc ## _INT_IPU_SYN, \
.errirq = soc ## _INT_IPU_ERR, \
}
#ifdef CONFIG_SOC_IMX31
const struct imx_ipu_core_data imx31_ipu_core_data __initconst =
imx_ipu_core_entry_single(MX31);
#endif
#ifdef CONFIG_SOC_IMX35
const struct imx_ipu_core_data imx35_ipu_core_data __initconst =
imx_ipu_core_entry_single(MX35);
#endif
static struct platform_device *imx_ipu_coredev __initdata;
struct platform_device *__init imx_add_ipu_core(
const struct imx_ipu_core_data *data,
const struct ipu_platform_data *pdata)
{
/* The resource order is important! */
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + 0x5f,
.flags = IORESOURCE_MEM,
}, {
.start = data->iobase + 0x88,
.end = data->iobase + 0xb3,
.flags = IORESOURCE_MEM,
}, {
.start = data->synirq,
.end = data->synirq,
.flags = IORESOURCE_IRQ,
}, {
.start = data->errirq,
.end = data->errirq,
.flags = IORESOURCE_IRQ,
},
};
return imx_ipu_coredev = imx_add_platform_device("ipu-core", -1,
res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
}
struct platform_device *__init imx_alloc_mx3_camera(
const struct imx_ipu_core_data *data,
const struct mx3_camera_pdata *pdata)
{
struct resource res[] = {
{
.start = data->iobase + 0x60,
.end = data->iobase + 0x87,
.flags = IORESOURCE_MEM,
},
};
int ret = -ENOMEM;
struct platform_device *pdev;
if (IS_ERR_OR_NULL(imx_ipu_coredev))
return ERR_PTR(-ENODEV);
pdev = platform_device_alloc("mx3-camera", 0);
if (!pdev)
goto err;
pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
goto err;
*pdev->dev.dma_mask = DMA_BIT_MASK(32);
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
if (ret)
goto err;
if (pdata) {
struct mx3_camera_pdata *copied_pdata;
ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (ret) {
err:
kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(-ENODEV);
}
copied_pdata = dev_get_platdata(&pdev->dev);
copied_pdata->dma_dev = &imx_ipu_coredev->dev;
}
return pdev;
}
struct platform_device *__init imx_add_mx3_sdc_fb(
const struct imx_ipu_core_data *data,
struct mx3fb_platform_data *pdata)
{
struct resource res[] = {
{
.start = data->iobase + 0xb4,
.end = data->iobase + 0x1bf,
.flags = IORESOURCE_MEM,
},
};
if (IS_ERR_OR_NULL(imx_ipu_coredev))
return ERR_PTR(-ENODEV);
pdata->dma_dev = &imx_ipu_coredev->dev;
return imx_add_platform_device_dmamask("mx3_sdc_fb", -1,
res, ARRAY_SIZE(res), pdata, sizeof(*pdata),
DMA_BIT_MASK(32));
}
| gpl-2.0 |
MinimalOS-AOSP/kernel_lge_hammerhead | drivers/net/wireless/iwlegacy/3945-debug.c | 7542 | 19561 | /******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include "common.h"
#include "3945.h"
static int
il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
{
int p = 0;
p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
le32_to_cpu(il->_3945.stats.flag));
if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
p += scnprintf(buf + p, bufsz - p,
"\tStatistics have been cleared\n");
p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
(le32_to_cpu(il->_3945.stats.flag) &
UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
(le32_to_cpu(il->_3945.stats.flag) &
UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
return p;
}
ssize_t
il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz =
sizeof(struct iwl39_stats_rx_phy) * 40 +
sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
ssize_t ret;
struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct iwl39_stats_rx_non_phy *general, *accum_general;
struct iwl39_stats_rx_non_phy *delta_general, *max_general;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
ofdm = &il->_3945.stats.rx.ofdm;
cck = &il->_3945.stats.rx.cck;
general = &il->_3945.stats.rx.general;
accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
accum_cck = &il->_3945.accum_stats.rx.cck;
accum_general = &il->_3945.accum_stats.rx.general;
delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
delta_cck = &il->_3945.delta_stats.rx.cck;
delta_general = &il->_3945.delta_stats.rx.general;
max_ofdm = &il->_3945.max_delta.rx.ofdm;
max_cck = &il->_3945.max_delta.rx.cck;
max_general = &il->_3945.max_delta.rx.general;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - OFDM:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ina_cnt:",
le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_cnt:",
le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "plcp_err:",
le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
delta_ofdm->plcp_err, max_ofdm->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_err:",
le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
delta_ofdm->crc32_err, max_ofdm->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "overrun_err:",
le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
delta_ofdm->overrun_err, max_ofdm->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
accum_ofdm->early_overrun_err,
delta_ofdm->early_overrun_err,
max_ofdm->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_good:",
le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
delta_ofdm->crc32_good, max_ofdm->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
max_ofdm->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
accum_ofdm->fina_sync_err_cnt,
delta_ofdm->fina_sync_err_cnt,
max_ofdm->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
max_ofdm->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
accum_ofdm->rxe_frame_limit_overrun,
delta_ofdm->rxe_frame_limit_overrun,
max_ofdm->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - CCK:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ina_cnt:",
le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
delta_cck->ina_cnt, max_cck->ina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_cnt:",
le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
delta_cck->fina_cnt, max_cck->fina_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "plcp_err:",
le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
delta_cck->plcp_err, max_cck->plcp_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_err:",
le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
delta_cck->crc32_err, max_cck->crc32_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "overrun_err:",
le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
delta_cck->overrun_err, max_cck->overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
accum_cck->early_overrun_err,
delta_cck->early_overrun_err, max_cck->early_overrun_err);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "crc32_good:",
le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
delta_cck->crc32_good, max_cck->crc32_good);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
max_cck->false_alarm_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
accum_cck->fina_sync_err_cnt,
delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
delta_cck->sfd_timeout, max_cck->sfd_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "fina_timeout:",
le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
delta_cck->fina_timeout, max_cck->fina_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
accum_cck->unresponded_rts, delta_cck->unresponded_rts,
max_cck->unresponded_rts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
accum_cck->rxe_frame_limit_overrun,
delta_cck->rxe_frame_limit_overrun,
max_cck->rxe_frame_limit_overrun);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Rx - GENERAL:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bogus_cts:",
le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
delta_general->bogus_cts, max_general->bogus_cts);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bogus_ack:",
le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
delta_general->bogus_ack, max_general->bogus_ack);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
accum_general->non_bssid_frames,
delta_general->non_bssid_frames,
max_general->non_bssid_frames);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "filtered_frames:",
le32_to_cpu(general->filtered_frames),
accum_general->filtered_frames,
delta_general->filtered_frames,
max_general->filtered_frames);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n",
"non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
accum_general->non_channel_beacons,
delta_general->non_channel_beacons,
max_general->non_channel_beacons);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
ssize_t ret;
struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
tx = &il->_3945.stats.tx;
accum_tx = &il->_3945.accum_stats.tx;
delta_tx = &il->_3945.delta_stats.tx;
max_tx = &il->_3945.max_delta.tx;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_Tx:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "preamble:",
le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
delta_tx->preamble_cnt, max_tx->preamble_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
max_tx->rx_detected_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
max_tx->bt_prio_defer_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
max_tx->bt_prio_kill_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "cts_timeout:",
le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
delta_tx->cts_timeout, max_tx->cts_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "ack_timeout:",
le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
delta_tx->ack_timeout, max_tx->ack_timeout);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
max_tx->expected_ack_cnt);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
ssize_t
il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct il_priv *il = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
ssize_t ret;
struct iwl39_stats_general *general, *accum_general;
struct iwl39_stats_general *delta_general, *max_general;
struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
if (!il_is_alive(il))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IL_ERR("Can not allocate Buffer\n");
return -ENOMEM;
}
/*
* The statistic information display here is based on
* the last stats notification from uCode
* might not reflect the current uCode activity
*/
general = &il->_3945.stats.general;
dbg = &il->_3945.stats.general.dbg;
div = &il->_3945.stats.general.div;
accum_general = &il->_3945.accum_stats.general;
delta_general = &il->_3945.delta_stats.general;
max_general = &il->_3945.max_delta.general;
accum_dbg = &il->_3945.accum_stats.general.dbg;
delta_dbg = &il->_3945.delta_stats.general.dbg;
max_dbg = &il->_3945.max_delta.general.dbg;
accum_div = &il->_3945.accum_stats.general.div;
delta_div = &il->_3945.delta_stats.general.div;
max_div = &il->_3945.max_delta.general.div;
pos += il3945_stats_flag(il, buf, bufsz);
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
"acumulative delta max\n",
"Statistics_General:");
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "burst_check:",
le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
delta_dbg->burst_check, max_dbg->burst_check);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "burst_count:",
le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
delta_dbg->burst_count, max_dbg->burst_count);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "sleep_time:",
le32_to_cpu(general->sleep_time),
accum_general->sleep_time, delta_general->sleep_time,
max_general->sleep_time);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "slots_out:",
le32_to_cpu(general->slots_out), accum_general->slots_out,
delta_general->slots_out, max_general->slots_out);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "slots_idle:",
le32_to_cpu(general->slots_idle),
accum_general->slots_idle, delta_general->slots_idle,
max_general->slots_idle);
pos +=
scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "tx_on_a:",
le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
delta_div->tx_on_a, max_div->tx_on_a);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "tx_on_b:",
le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
delta_div->tx_on_b, max_div->tx_on_b);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "exec_time:",
le32_to_cpu(div->exec_time), accum_div->exec_time,
delta_div->exec_time, max_div->exec_time);
pos +=
scnprintf(buf + pos, bufsz - pos,
" %-30s %10u %10u %10u %10u\n", "probe_time:",
le32_to_cpu(div->probe_time), accum_div->probe_time,
delta_div->probe_time, max_div->probe_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
}
const struct il_debugfs_ops il3945_debugfs_ops = {
.rx_stats_read = il3945_ucode_rx_stats_read,
.tx_stats_read = il3945_ucode_tx_stats_read,
.general_stats_read = il3945_ucode_general_stats_read,
};
| gpl-2.0 |
oppo-source/Find7-5.0-kernel-source | fs/notify/inotify/inotify_fsnotify.c | 7798 | 6631 | /*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/dcache.h> /* d_unlinked */
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
#include <linux/path.h> /* struct path */
#include <linux/slab.h> /* kmem_* */
#include <linux/types.h>
#include <linux/sched.h>
#include "inotify.h"
/*
* Check if 2 events contain the same information. We do not compare private data
* but at this moment that isn't a problem for any know fsnotify listeners.
*/
static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
{
if ((old->mask == new->mask) &&
(old->to_tell == new->to_tell) &&
(old->data_type == new->data_type) &&
(old->name_len == new->name_len)) {
switch (old->data_type) {
case (FSNOTIFY_EVENT_INODE):
/* remember, after old was put on the wait_q we aren't
* allowed to look at the inode any more, only thing
* left to check was if the file_name is the same */
if (!old->name_len ||
!strcmp(old->file_name, new->file_name))
return true;
break;
case (FSNOTIFY_EVENT_PATH):
if ((old->path.mnt == new->path.mnt) &&
(old->path.dentry == new->path.dentry))
return true;
break;
case (FSNOTIFY_EVENT_NONE):
if (old->mask & FS_Q_OVERFLOW)
return true;
else if (old->mask & FS_IN_IGNORED)
return false;
return true;
};
}
return false;
}
static struct fsnotify_event *inotify_merge(struct list_head *list,
struct fsnotify_event *event)
{
struct fsnotify_event_holder *last_holder;
struct fsnotify_event *last_event;
/* and the list better be locked by something too */
spin_lock(&event->lock);
last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
last_event = last_holder->event;
if (event_compare(last_event, event))
fsnotify_get_event(last_event);
else
last_event = NULL;
spin_unlock(&event->lock);
return last_event;
}
static int inotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event)
{
struct inotify_inode_mark *i_mark;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
struct fsnotify_event *added_event;
int wd, ret = 0;
BUG_ON(vfsmount_mark);
pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group,
event, event->to_tell, event->mask);
to_tell = event->to_tell;
i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark);
wd = i_mark->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
return -ENOMEM;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = wd;
added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge);
if (added_event) {
inotify_free_event_priv(fsn_event_priv);
if (!IS_ERR(added_event))
fsnotify_put_event(added_event);
else
ret = PTR_ERR(added_event);
}
if (inode_mark->mask & IN_ONESHOT)
fsnotify_destroy_mark(inode_mark);
return ret;
}
static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
{
inotify_ignored_and_remove_idr(fsn_mark, group);
}
static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type)
{
if ((inode_mark->mask & FS_EXCL_UNLINK) &&
(data_type == FSNOTIFY_EVENT_PATH)) {
struct path *path = data;
if (d_unlinked(path->dentry))
return false;
}
return true;
}
/*
* This is NEVER supposed to be called. Inotify marks should either have been
* removed from the idr when the watch was removed or in the
* fsnotify_destroy_mark_by_group() call when the inotify instance was being
* torn down. This is only called if the idr is about to be freed but there
* are still marks in it.
*/
static int idr_callback(int id, void *p, void *data)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
static bool warned = false;
if (warned)
return 0;
warned = true;
fsn_mark = p;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
* I'm taking the liberty of assuming that the mark in question is a
* valid address and I'm dereferencing it. This might help to figure
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
if (fsn_mark)
printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
return 0;
}
static void inotify_free_group_priv(struct fsnotify_group *group)
{
/* ideally the idr is empty and we won't hit the BUG in the callback */
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
atomic_dec(&group->inotify_data.user->inotify_devs);
free_uid(group->inotify_data.user);
}
void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
{
struct inotify_event_private_data *event_priv;
event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
kmem_cache_free(event_priv_cachep, event_priv);
}
const struct fsnotify_ops inotify_fsnotify_ops = {
.handle_event = inotify_handle_event,
.should_send_event = inotify_should_send_event,
.free_group_priv = inotify_free_group_priv,
.free_event_priv = inotify_free_event_priv,
.freeing_mark = inotify_freeing_mark,
};
| gpl-2.0 |
DutchDanny/pyramidLE-ICS | fs/ocfs2/cluster/masklog.c | 11126 | 3894 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <asm/uaccess.h>
#include "masklog.h"
struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(mlog_and_bits);
struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(mlog_not_bits);
static ssize_t mlog_mask_show(u64 mask, char *buf)
{
char *state;
if (__mlog_test_u64(mask, mlog_and_bits))
state = "allow";
else if (__mlog_test_u64(mask, mlog_not_bits))
state = "deny";
else
state = "off";
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
{
if (!strnicmp(buf, "allow", 5)) {
__mlog_set_u64(mask, mlog_and_bits);
__mlog_clear_u64(mask, mlog_not_bits);
} else if (!strnicmp(buf, "deny", 4)) {
__mlog_set_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else if (!strnicmp(buf, "off", 3)) {
__mlog_clear_u64(mask, mlog_not_bits);
__mlog_clear_u64(mask, mlog_and_bits);
} else
return -EINVAL;
return count;
}
struct mlog_attribute {
struct attribute attr;
u64 mask;
};
#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
#define define_mask(_name) { \
.attr = { \
.name = #_name, \
.mode = S_IRUGO | S_IWUSR, \
}, \
.mask = ML_##_name, \
}
static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(TCP),
define_mask(MSG),
define_mask(SOCKET),
define_mask(HEARTBEAT),
define_mask(HB_BIO),
define_mask(DLMFS),
define_mask(DLM),
define_mask(DLM_DOMAIN),
define_mask(DLM_THREAD),
define_mask(DLM_MASTER),
define_mask(DLM_RECOVERY),
define_mask(DLM_GLUE),
define_mask(VOTE),
define_mask(CONN),
define_mask(QUORUM),
define_mask(BASTS),
define_mask(CLUSTER),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
};
static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
char *buf)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_show(mlog_attr->mask, buf);
}
static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
const char *buf, size_t count)
{
struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
return mlog_mask_store(mlog_attr->mask, buf, count);
}
static const struct sysfs_ops mlog_attr_ops = {
.show = mlog_show,
.store = mlog_store,
};
static struct kobj_type mlog_ktype = {
.default_attrs = mlog_attr_ptrs,
.sysfs_ops = &mlog_attr_ops,
};
static struct kset mlog_kset = {
.kobj = {.ktype = &mlog_ktype},
};
int mlog_sys_init(struct kset *o2cb_kset)
{
int i = 0;
while (mlog_attrs[i].attr.mode) {
mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
i++;
}
mlog_attr_ptrs[i] = NULL;
kobject_set_name(&mlog_kset.kobj, "logmask");
mlog_kset.kobj.kset = o2cb_kset;
return kset_register(&mlog_kset);
}
void mlog_sys_shutdown(void)
{
kset_unregister(&mlog_kset);
}
| gpl-2.0 |
GaloisInc/linux-deadline | samples/trace_events/trace-events-sample.c | 13174 | 1053 | #include <linux/module.h>
#include <linux/kthread.h>
/*
* Any file that uses trace points, must include the header.
* But only one file, must include the header by defining
* CREATE_TRACE_POINTS first. This will make the C code that
* creates the handles for the trace points.
*/
#define CREATE_TRACE_POINTS
#include "trace-events-sample.h"
static void simple_thread_func(int cnt)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
trace_foo_bar("hello", cnt);
}
static int simple_thread(void *arg)
{
int cnt = 0;
while (!kthread_should_stop())
simple_thread_func(cnt++);
return 0;
}
static struct task_struct *simple_tsk;
static int __init trace_event_init(void)
{
simple_tsk = kthread_run(simple_thread, NULL, "event-sample");
if (IS_ERR(simple_tsk))
return -1;
return 0;
}
static void __exit trace_event_exit(void)
{
kthread_stop(simple_tsk);
}
module_init(trace_event_init);
module_exit(trace_event_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("trace-events-sample");
MODULE_LICENSE("GPL");
| gpl-2.0 |
lostdj/Jaklin-OpenJFX | modules/web/src/main/native/Source/WebKit/gtk/webkit/webkitapplicationcache.cpp | 119 | 2627 | /*
* Copyright (C) 2009 Jan Michael Alonzo <jmalonzo@gmail.com>
* Copyright (C) 2011 Lukasz Slachciak
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "webkitapplicationcache.h"
#include "ApplicationCacheStorage.h"
#include "FileSystem.h"
#include <wtf/text/CString.h>
// web application cache maximum storage size
static unsigned long long cacheMaxSize = UINT_MAX;
/**
* webkit_application_cache_get_maximum_size:
*
* Returns the maximum size of the cache storage.
* By default it is set to UINT_MAX i.e. no quota.
*
* Returns: the current application cache maximum storage size
*
* Since: 1.3.13
**/
unsigned long long webkit_application_cache_get_maximum_size()
{
return (cacheMaxSize = WebCore::cacheStorage().maximumSize());
}
/**
* webkit_application_cache_set_maximum_size:
* @size: the new web application cache maximum storage size
*
* Sets new application cache maximum storage size.
* Changing the application cache storage size will clear the cache
* and rebuild cache storage.
*
* Since: 1.3.13
**/
void webkit_application_cache_set_maximum_size(unsigned long long size)
{
if (size != cacheMaxSize) {
WebCore::cacheStorage().empty();
WebCore::cacheStorage().vacuumDatabaseFile();
WebCore::cacheStorage().setMaximumSize(size);
cacheMaxSize = size;
}
}
/**
* webkit_application_cache_get_database_directory_path:
*
* Returns the path to the directory WebKit will write web application
* cache databases to. By default this path is set to
* $XDG_CACHE_HOME/webkitgtk/applications and cannot be modified.
*
* Returns: the application cache database directory path
*
* Since: 1.3.13
**/
const gchar* webkit_application_cache_get_database_directory_path()
{
CString path = WebCore::fileSystemRepresentation(WebCore::cacheStorage().cacheDirectory());
return path.data();
}
| gpl-2.0 |
mynew5/tc | dep/recastnavigation/Detour/Source/DetourNavMesh.cpp | 119 | 42192 | //
// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdio.h>
#include "DetourNavMesh.h"
#include "DetourNode.h"
#include "DetourCommon.h"
#include "DetourAlloc.h"
#include "DetourAssert.h"
#include <new>
inline bool overlapSlabs(const float* amin, const float* amax,
const float* bmin, const float* bmax,
const float px, const float py)
{
// Check for horizontal overlap.
// The segment is shrunken a little so that slabs which touch
// at end points are not connected.
const float minx = dtMax(amin[0]+px,bmin[0]+px);
const float maxx = dtMin(amax[0]-px,bmax[0]-px);
if (minx > maxx)
return false;
// Check vertical overlap.
const float ad = (amax[1]-amin[1]) / (amax[0]-amin[0]);
const float ak = amin[1] - ad*amin[0];
const float bd = (bmax[1]-bmin[1]) / (bmax[0]-bmin[0]);
const float bk = bmin[1] - bd*bmin[0];
const float aminy = ad*minx + ak;
const float amaxy = ad*maxx + ak;
const float bminy = bd*minx + bk;
const float bmaxy = bd*maxx + bk;
const float dmin = bminy - aminy;
const float dmax = bmaxy - amaxy;
// Crossing segments always overlap.
if (dmin*dmax < 0)
return true;
// Check for overlap at endpoints.
const float thr = dtSqr(py*2);
if (dmin*dmin <= thr || dmax*dmax <= thr)
return true;
return false;
}
static float getSlabCoord(const float* va, const int side)
{
if (side == 0 || side == 4)
return va[0];
else if (side == 2 || side == 6)
return va[2];
return 0;
}
static void calcSlabEndPoints(const float* va, const float* vb, float* bmin, float* bmax, const int side)
{
if (side == 0 || side == 4)
{
if (va[2] < vb[2])
{
bmin[0] = va[2];
bmin[1] = va[1];
bmax[0] = vb[2];
bmax[1] = vb[1];
}
else
{
bmin[0] = vb[2];
bmin[1] = vb[1];
bmax[0] = va[2];
bmax[1] = va[1];
}
}
else if (side == 2 || side == 6)
{
if (va[0] < vb[0])
{
bmin[0] = va[0];
bmin[1] = va[1];
bmax[0] = vb[0];
bmax[1] = vb[1];
}
else
{
bmin[0] = vb[0];
bmin[1] = vb[1];
bmax[0] = va[0];
bmax[1] = va[1];
}
}
}
inline int computeTileHash(int x, int y, const int mask)
{
const unsigned int h1 = 0x8da6b343; // Large multiplicative constants;
const unsigned int h2 = 0xd8163841; // here arbitrarily chosen primes
unsigned int n = h1 * x + h2 * y;
return (int)(n & mask);
}
inline unsigned int allocLink(dtMeshTile* tile)
{
if (tile->linksFreeList == DT_NULL_LINK)
return DT_NULL_LINK;
unsigned int link = tile->linksFreeList;
tile->linksFreeList = tile->links[link].next;
return link;
}
inline void freeLink(dtMeshTile* tile, unsigned int link)
{
tile->links[link].next = tile->linksFreeList;
tile->linksFreeList = link;
}
dtNavMesh* dtAllocNavMesh()
{
void* mem = dtAlloc(sizeof(dtNavMesh), DT_ALLOC_PERM);
if (!mem) return 0;
return new(mem) dtNavMesh;
}
/// @par
///
/// This function will only free the memory for tiles with the #DT_TILE_FREE_DATA
/// flag set.
void dtFreeNavMesh(dtNavMesh* navmesh)
{
if (!navmesh) return;
navmesh->~dtNavMesh();
dtFree(navmesh);
}
//////////////////////////////////////////////////////////////////////////////////////////
/**
@class dtNavMesh
The navigation mesh consists of one or more tiles defining three primary types of structural data:
A polygon mesh which defines most of the navigation graph. (See rcPolyMesh for its structure.)
A detail mesh used for determining surface height on the polygon mesh. (See rcPolyMeshDetail for its structure.)
Off-mesh connections, which define custom point-to-point edges within the navigation graph.
The general build process is as follows:
-# Create rcPolyMesh and rcPolyMeshDetail data using the Recast build pipeline.
-# Optionally, create off-mesh connection data.
-# Combine the source data into a dtNavMeshCreateParams structure.
-# Create a tile data array using dtCreateNavMeshData().
-# Allocate at dtNavMesh object and initialize it. (For single tile navigation meshes,
the tile data is loaded during this step.)
-# For multi-tile navigation meshes, load the tile data using dtNavMesh::addTile().
Notes:
- This class is usually used in conjunction with the dtNavMeshQuery class for pathfinding.
- Technically, all navigation meshes are tiled. A 'solo' mesh is simply a navigation mesh initialized
to have only a single tile.
- This class does not implement any asynchronous methods. So the ::dtStatus result of all methods will
always contain either a success or failure flag.
@see dtNavMeshQuery, dtCreateNavMeshData, dtNavMeshCreateParams, #dtAllocNavMesh, #dtFreeNavMesh
*/
dtNavMesh::dtNavMesh() :
m_tileWidth(0),
m_tileHeight(0),
m_maxTiles(0),
m_tileLutSize(0),
m_tileLutMask(0),
m_posLookup(0),
m_nextFree(0),
m_tiles(0),
m_saltBits(0),
m_tileBits(0),
m_polyBits(0)
{
memset(&m_params, 0, sizeof(dtNavMeshParams));
m_orig[0] = 0;
m_orig[1] = 0;
m_orig[2] = 0;
}
dtNavMesh::~dtNavMesh()
{
for (int i = 0; i < m_maxTiles; ++i)
{
if (m_tiles[i].flags & DT_TILE_FREE_DATA)
{
dtFree(m_tiles[i].data);
m_tiles[i].data = 0;
m_tiles[i].dataSize = 0;
}
}
dtFree(m_posLookup);
dtFree(m_tiles);
}
dtStatus dtNavMesh::init(const dtNavMeshParams* params)
{
memcpy(&m_params, params, sizeof(dtNavMeshParams));
dtVcopy(m_orig, params->orig);
m_tileWidth = params->tileWidth;
m_tileHeight = params->tileHeight;
// Init tiles
m_maxTiles = params->maxTiles;
m_tileLutSize = dtNextPow2(params->maxTiles/4);
if (!m_tileLutSize) m_tileLutSize = 1;
m_tileLutMask = m_tileLutSize-1;
m_tiles = (dtMeshTile*)dtAlloc(sizeof(dtMeshTile)*m_maxTiles, DT_ALLOC_PERM);
if (!m_tiles)
return DT_FAILURE | DT_OUT_OF_MEMORY;
m_posLookup = (dtMeshTile**)dtAlloc(sizeof(dtMeshTile*)*m_tileLutSize, DT_ALLOC_PERM);
if (!m_posLookup)
return DT_FAILURE | DT_OUT_OF_MEMORY;
memset(m_tiles, 0, sizeof(dtMeshTile)*m_maxTiles);
memset(m_posLookup, 0, sizeof(dtMeshTile*)*m_tileLutSize);
m_nextFree = 0;
for (int i = m_maxTiles-1; i >= 0; --i)
{
m_tiles[i].salt = 1;
m_tiles[i].next = m_nextFree;
m_nextFree = &m_tiles[i];
}
// Edited by TC
m_tileBits = STATIC_TILE_BITS;
m_polyBits = STATIC_POLY_BITS;
m_saltBits = STATIC_SALT_BITS;
return DT_SUCCESS;
}
dtStatus dtNavMesh::init(unsigned char* data, const int dataSize, const int flags)
{
// Make sure the data is in right format.
dtMeshHeader* header = (dtMeshHeader*)data;
if (header->magic != DT_NAVMESH_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (header->version != DT_NAVMESH_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
dtNavMeshParams params;
dtVcopy(params.orig, header->bmin);
params.tileWidth = header->bmax[0] - header->bmin[0];
params.tileHeight = header->bmax[2] - header->bmin[2];
params.maxTiles = 1;
params.maxPolys = header->polyCount;
dtStatus status = init(¶ms);
if (dtStatusFailed(status))
return status;
return addTile(data, dataSize, flags, 0, 0);
}
/// @par
///
/// @note The parameters are created automatically when the single tile
/// initialization is performed.
const dtNavMeshParams* dtNavMesh::getParams() const
{
return &m_params;
}
//////////////////////////////////////////////////////////////////////////////////////////
int dtNavMesh::findConnectingPolys(const float* va, const float* vb,
const dtMeshTile* tile, int side,
dtPolyRef* con, float* conarea, int maxcon) const
{
if (!tile) return 0;
float amin[2], amax[2];
calcSlabEndPoints(va,vb, amin,amax, side);
const float apos = getSlabCoord(va, side);
// Remove links pointing to 'side' and compact the links array.
float bmin[2], bmax[2];
unsigned short m = DT_EXT_LINK | (unsigned short)side;
int n = 0;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
const int nv = poly->vertCount;
for (int j = 0; j < nv; ++j)
{
// Skip edges which do not point to the right side.
if (poly->neis[j] != m) continue;
const float* vc = &tile->verts[poly->verts[j]*3];
const float* vd = &tile->verts[poly->verts[(j+1) % nv]*3];
const float bpos = getSlabCoord(vc, side);
// Segments are not close enough.
if (dtAbs(apos-bpos) > 0.01f)
continue;
// Check if the segments touch.
calcSlabEndPoints(vc,vd, bmin,bmax, side);
if (!overlapSlabs(amin,amax, bmin,bmax, 0.01f, tile->header->walkableClimb)) continue;
// Add return value.
if (n < maxcon)
{
conarea[n*2+0] = dtMax(amin[0], bmin[0]);
conarea[n*2+1] = dtMin(amax[0], bmax[0]);
con[n] = base | (dtPolyRef)i;
n++;
}
break;
}
}
return n;
}
void dtNavMesh::unconnectExtLinks(dtMeshTile* tile, dtMeshTile* target)
{
if (!tile || !target) return;
const unsigned int targetNum = decodePolyIdTile(getTileRef(target));
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
unsigned int j = poly->firstLink;
unsigned int pj = DT_NULL_LINK;
while (j != DT_NULL_LINK)
{
if (tile->links[j].side != 0xff &&
decodePolyIdTile(tile->links[j].ref) == targetNum)
{
// Revove link.
unsigned int nj = tile->links[j].next;
if (pj == DT_NULL_LINK)
poly->firstLink = nj;
else
tile->links[pj].next = nj;
freeLink(tile, j);
j = nj;
}
else
{
// Advance
pj = j;
j = tile->links[j].next;
}
}
}
}
void dtNavMesh::connectExtLinks(dtMeshTile* tile, dtMeshTile* target, int side)
{
if (!tile) return;
// Connect border links.
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
// Create new links.
// unsigned short m = DT_EXT_LINK | (unsigned short)side;
const int nv = poly->vertCount;
for (int j = 0; j < nv; ++j)
{
// Skip non-portal edges.
if ((poly->neis[j] & DT_EXT_LINK) == 0)
continue;
const int dir = (int)(poly->neis[j] & 0xff);
if (side != -1 && dir != side)
continue;
// Create new links
const float* va = &tile->verts[poly->verts[j]*3];
const float* vb = &tile->verts[poly->verts[(j+1) % nv]*3];
dtPolyRef nei[4];
float neia[4*2];
int nnei = findConnectingPolys(va,vb, target, dtOppositeTile(dir), nei,neia,4);
for (int k = 0; k < nnei; ++k)
{
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = nei[k];
link->edge = (unsigned char)j;
link->side = (unsigned char)dir;
link->next = poly->firstLink;
poly->firstLink = idx;
// Compress portal limits to a byte value.
if (dir == 0 || dir == 4)
{
float tmin = (neia[k*2+0]-va[2]) / (vb[2]-va[2]);
float tmax = (neia[k*2+1]-va[2]) / (vb[2]-va[2]);
if (tmin > tmax)
dtSwap(tmin,tmax);
link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f);
link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f);
}
else if (dir == 2 || dir == 6)
{
float tmin = (neia[k*2+0]-va[0]) / (vb[0]-va[0]);
float tmax = (neia[k*2+1]-va[0]) / (vb[0]-va[0]);
if (tmin > tmax)
dtSwap(tmin,tmax);
link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f);
link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f);
}
}
}
}
}
}
void dtNavMesh::connectExtOffMeshLinks(dtMeshTile* tile, dtMeshTile* target, int side)
{
if (!tile) return;
// Connect off-mesh links.
// We are interested on links which land from target tile to this tile.
const unsigned char oppositeSide = (side == -1) ? 0xff : (unsigned char)dtOppositeTile(side);
for (int i = 0; i < target->header->offMeshConCount; ++i)
{
dtOffMeshConnection* targetCon = &target->offMeshCons[i];
if (targetCon->side != oppositeSide)
continue;
dtPoly* targetPoly = &target->polys[targetCon->poly];
// Skip off-mesh connections which start location could not be connected at all.
if (targetPoly->firstLink == DT_NULL_LINK)
continue;
const float ext[3] = { targetCon->rad, target->header->walkableClimb, targetCon->rad };
// Find polygon to connect to.
const float* p = &targetCon->pos[3];
float nearestPt[3];
dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt);
if (!ref)
continue;
// findNearestPoly may return too optimistic results, further check to make sure.
if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(targetCon->rad))
continue;
// Make sure the location is on current mesh.
float* v = &target->verts[targetPoly->verts[1]*3];
dtVcopy(v, nearestPt);
// Link off-mesh connection to target poly.
unsigned int idx = allocLink(target);
if (idx != DT_NULL_LINK)
{
dtLink* link = &target->links[idx];
link->ref = ref;
link->edge = (unsigned char)1;
link->side = oppositeSide;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = targetPoly->firstLink;
targetPoly->firstLink = idx;
}
// Link target poly to off-mesh connection.
if (targetCon->flags & DT_OFFMESH_CON_BIDIR)
{
unsigned int tidx = allocLink(tile);
if (tidx != DT_NULL_LINK)
{
const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref);
dtPoly* landPoly = &tile->polys[landPolyIdx];
dtLink* link = &tile->links[tidx];
link->ref = getPolyRefBase(target) | (dtPolyRef)(targetCon->poly);
link->edge = 0xff;
link->side = (unsigned char)(side == -1 ? 0xff : side);
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = landPoly->firstLink;
landPoly->firstLink = tidx;
}
}
}
}
void dtNavMesh::connectIntLinks(dtMeshTile* tile)
{
if (!tile) return;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* poly = &tile->polys[i];
poly->firstLink = DT_NULL_LINK;
if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
continue;
// Build edge links backwards so that the links will be
// in the linked list from lowest index to highest.
for (int j = poly->vertCount-1; j >= 0; --j)
{
// Skip hard and non-internal edges.
if (poly->neis[j] == 0 || (poly->neis[j] & DT_EXT_LINK)) continue;
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = base | (dtPolyRef)(poly->neis[j]-1);
link->edge = (unsigned char)j;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = poly->firstLink;
poly->firstLink = idx;
}
}
}
}
void dtNavMesh::baseOffMeshLinks(dtMeshTile* tile)
{
if (!tile) return;
dtPolyRef base = getPolyRefBase(tile);
// Base off-mesh connection start points.
for (int i = 0; i < tile->header->offMeshConCount; ++i)
{
dtOffMeshConnection* con = &tile->offMeshCons[i];
dtPoly* poly = &tile->polys[con->poly];
const float ext[3] = { con->rad, tile->header->walkableClimb, con->rad };
// Find polygon to connect to.
const float* p = &con->pos[0]; // First vertex
float nearestPt[3];
dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt);
if (!ref) continue;
// findNearestPoly may return too optimistic results, further check to make sure.
if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(con->rad))
continue;
// Make sure the location is on current mesh.
float* v = &tile->verts[poly->verts[0]*3];
dtVcopy(v, nearestPt);
// Link off-mesh connection to target poly.
unsigned int idx = allocLink(tile);
if (idx != DT_NULL_LINK)
{
dtLink* link = &tile->links[idx];
link->ref = ref;
link->edge = (unsigned char)0;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = poly->firstLink;
poly->firstLink = idx;
}
// Start end-point is always connect back to off-mesh connection.
unsigned int tidx = allocLink(tile);
if (tidx != DT_NULL_LINK)
{
const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref);
dtPoly* landPoly = &tile->polys[landPolyIdx];
dtLink* link = &tile->links[tidx];
link->ref = base | (dtPolyRef)(con->poly);
link->edge = 0xff;
link->side = 0xff;
link->bmin = link->bmax = 0;
// Add to linked list.
link->next = landPoly->firstLink;
landPoly->firstLink = tidx;
}
}
}
void dtNavMesh::closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest, bool* posOverPoly) const
{
const dtMeshTile* tile = 0;
const dtPoly* poly = 0;
getTileAndPolyByRefUnsafe(ref, &tile, &poly);
// Off-mesh connections don't have detail polygons.
if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
{
const float* v0 = &tile->verts[poly->verts[0]*3];
const float* v1 = &tile->verts[poly->verts[1]*3];
const float d0 = dtVdist(pos, v0);
const float d1 = dtVdist(pos, v1);
const float u = d0 / (d0+d1);
dtVlerp(closest, v0, v1, u);
if (posOverPoly)
*posOverPoly = false;
return;
}
const unsigned int ip = (unsigned int)(poly - tile->polys);
const dtPolyDetail* pd = &tile->detailMeshes[ip];
// Clamp point to be inside the polygon.
float verts[DT_VERTS_PER_POLYGON*3];
float edged[DT_VERTS_PER_POLYGON];
float edget[DT_VERTS_PER_POLYGON];
const int nv = poly->vertCount;
for (int i = 0; i < nv; ++i)
dtVcopy(&verts[i*3], &tile->verts[poly->verts[i]*3]);
dtVcopy(closest, pos);
if (!dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget))
{
// Point is outside the polygon, dtClamp to nearest edge.
float dmin = FLT_MAX;
int imin = -1;
for (int i = 0; i < nv; ++i)
{
if (edged[i] < dmin)
{
dmin = edged[i];
imin = i;
}
}
const float* va = &verts[imin*3];
const float* vb = &verts[((imin+1)%nv)*3];
dtVlerp(closest, va, vb, edget[imin]);
if (posOverPoly)
*posOverPoly = false;
}
else
{
if (posOverPoly)
*posOverPoly = true;
}
// Find height at the location.
for (int j = 0; j < pd->triCount; ++j)
{
const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4];
const float* v[3];
for (int k = 0; k < 3; ++k)
{
if (t[k] < poly->vertCount)
v[k] = &tile->verts[poly->verts[t[k]]*3];
else
v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3];
}
float h;
if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h))
{
closest[1] = h;
break;
}
}
}
dtPolyRef dtNavMesh::findNearestPolyInTile(const dtMeshTile* tile,
const float* center, const float* extents,
float* nearestPt) const
{
float bmin[3], bmax[3];
dtVsub(bmin, center, extents);
dtVadd(bmax, center, extents);
// Get nearby polygons from proximity grid.
dtPolyRef polys[128];
int polyCount = queryPolygonsInTile(tile, bmin, bmax, polys, 128);
// Find nearest polygon amongst the nearby polygons.
dtPolyRef nearest = 0;
float nearestDistanceSqr = FLT_MAX;
for (int i = 0; i < polyCount; ++i)
{
dtPolyRef ref = polys[i];
float closestPtPoly[3];
float diff[3];
bool posOverPoly = false;
float d = 0;
closestPointOnPoly(ref, center, closestPtPoly, &posOverPoly);
// If a point is directly over a polygon and closer than
// climb height, favor that instead of straight line nearest point.
dtVsub(diff, center, closestPtPoly);
if (posOverPoly)
{
d = dtAbs(diff[1]) - tile->header->walkableClimb;
d = d > 0 ? d*d : 0;
}
else
{
d = dtVlenSqr(diff);
}
if (d < nearestDistanceSqr)
{
dtVcopy(nearestPt, closestPtPoly);
nearestDistanceSqr = d;
nearest = ref;
}
}
return nearest;
}
int dtNavMesh::queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax,
dtPolyRef* polys, const int maxPolys) const
{
if (tile->bvTree)
{
const dtBVNode* node = &tile->bvTree[0];
const dtBVNode* end = &tile->bvTree[tile->header->bvNodeCount];
const float* tbmin = tile->header->bmin;
const float* tbmax = tile->header->bmax;
const float qfac = tile->header->bvQuantFactor;
// Calculate quantized box
unsigned short bmin[3], bmax[3];
// dtClamp query box to world box.
float minx = dtClamp(qmin[0], tbmin[0], tbmax[0]) - tbmin[0];
float miny = dtClamp(qmin[1], tbmin[1], tbmax[1]) - tbmin[1];
float minz = dtClamp(qmin[2], tbmin[2], tbmax[2]) - tbmin[2];
float maxx = dtClamp(qmax[0], tbmin[0], tbmax[0]) - tbmin[0];
float maxy = dtClamp(qmax[1], tbmin[1], tbmax[1]) - tbmin[1];
float maxz = dtClamp(qmax[2], tbmin[2], tbmax[2]) - tbmin[2];
// Quantize
bmin[0] = (unsigned short)(qfac * minx) & 0xfffe;
bmin[1] = (unsigned short)(qfac * miny) & 0xfffe;
bmin[2] = (unsigned short)(qfac * minz) & 0xfffe;
bmax[0] = (unsigned short)(qfac * maxx + 1) | 1;
bmax[1] = (unsigned short)(qfac * maxy + 1) | 1;
bmax[2] = (unsigned short)(qfac * maxz + 1) | 1;
// Traverse tree
dtPolyRef base = getPolyRefBase(tile);
int n = 0;
while (node < end)
{
const bool overlap = dtOverlapQuantBounds(bmin, bmax, node->bmin, node->bmax);
const bool isLeafNode = node->i >= 0;
if (isLeafNode && overlap)
{
if (n < maxPolys)
polys[n++] = base | (dtPolyRef)node->i;
}
if (overlap || isLeafNode)
node++;
else
{
const int escapeIndex = -node->i;
node += escapeIndex;
}
}
return n;
}
else
{
float bmin[3], bmax[3];
int n = 0;
dtPolyRef base = getPolyRefBase(tile);
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* p = &tile->polys[i];
// Do not return off-mesh connection polygons.
if (p->getType() == DT_POLYTYPE_OFFMESH_CONNECTION)
continue;
// Calc polygon bounds.
const float* v = &tile->verts[p->verts[0]*3];
dtVcopy(bmin, v);
dtVcopy(bmax, v);
for (int j = 1; j < p->vertCount; ++j)
{
v = &tile->verts[p->verts[j]*3];
dtVmin(bmin, v);
dtVmax(bmax, v);
}
if (dtOverlapBounds(qmin,qmax, bmin,bmax))
{
if (n < maxPolys)
polys[n++] = base | (dtPolyRef)i;
}
}
return n;
}
}
/// @par
///
/// The add operation will fail if the data is in the wrong format, the allocated tile
/// space is full, or there is a tile already at the specified reference.
///
/// The lastRef parameter is used to restore a tile with the same tile
/// reference it had previously used. In this case the #dtPolyRef's for the
/// tile will be restored to the same values they were before the tile was
/// removed.
///
/// @see dtCreateNavMeshData, #removeTile
dtStatus dtNavMesh::addTile(unsigned char* data, int dataSize, int flags,
dtTileRef lastRef, dtTileRef* result)
{
// Make sure the data is in right format.
dtMeshHeader* header = (dtMeshHeader*)data;
if (header->magic != DT_NAVMESH_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (header->version != DT_NAVMESH_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
// Make sure the location is free.
if (getTileAt(header->x, header->y, header->layer))
return DT_FAILURE;
// Allocate a tile.
dtMeshTile* tile = 0;
if (!lastRef)
{
if (m_nextFree)
{
tile = m_nextFree;
m_nextFree = tile->next;
tile->next = 0;
}
}
else
{
// Try to relocate the tile to specific index with same salt.
int tileIndex = (int)decodePolyIdTile((dtPolyRef)lastRef);
if (tileIndex >= m_maxTiles)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Try to find the specific tile id from the free list.
dtMeshTile* target = &m_tiles[tileIndex];
dtMeshTile* prev = 0;
tile = m_nextFree;
while (tile && tile != target)
{
prev = tile;
tile = tile->next;
}
// Could not find the correct location.
if (tile != target)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Remove from freelist
if (!prev)
m_nextFree = tile->next;
else
prev->next = tile->next;
// Restore salt.
tile->salt = decodePolyIdSalt((dtPolyRef)lastRef);
}
// Make sure we could allocate a tile.
if (!tile)
return DT_FAILURE | DT_OUT_OF_MEMORY;
// Insert tile into the position lut.
int h = computeTileHash(header->x, header->y, m_tileLutMask);
tile->next = m_posLookup[h];
m_posLookup[h] = tile;
// Patch header pointers.
const int headerSize = dtAlign4(sizeof(dtMeshHeader));
const int vertsSize = dtAlign4(sizeof(float)*3*header->vertCount);
const int polysSize = dtAlign4(sizeof(dtPoly)*header->polyCount);
const int linksSize = dtAlign4(sizeof(dtLink)*(header->maxLinkCount));
const int detailMeshesSize = dtAlign4(sizeof(dtPolyDetail)*header->detailMeshCount);
const int detailVertsSize = dtAlign4(sizeof(float)*3*header->detailVertCount);
const int detailTrisSize = dtAlign4(sizeof(unsigned char)*4*header->detailTriCount);
const int bvtreeSize = dtAlign4(sizeof(dtBVNode)*header->bvNodeCount);
const int offMeshLinksSize = dtAlign4(sizeof(dtOffMeshConnection)*header->offMeshConCount);
unsigned char* d = data + headerSize;
tile->verts = (float*)d; d += vertsSize;
tile->polys = (dtPoly*)d; d += polysSize;
tile->links = (dtLink*)d; d += linksSize;
tile->detailMeshes = (dtPolyDetail*)d; d += detailMeshesSize;
tile->detailVerts = (float*)d; d += detailVertsSize;
tile->detailTris = (unsigned char*)d; d += detailTrisSize;
tile->bvTree = (dtBVNode*)d; d += bvtreeSize;
tile->offMeshCons = (dtOffMeshConnection*)d; d += offMeshLinksSize;
// If there are no items in the bvtree, reset the tree pointer.
if (!bvtreeSize)
tile->bvTree = 0;
// Build links freelist
tile->linksFreeList = 0;
tile->links[header->maxLinkCount-1].next = DT_NULL_LINK;
for (int i = 0; i < header->maxLinkCount-1; ++i)
tile->links[i].next = i+1;
// Init tile.
tile->header = header;
tile->data = data;
tile->dataSize = dataSize;
tile->flags = flags;
connectIntLinks(tile);
baseOffMeshLinks(tile);
// Create connections with neighbour tiles.
static const int MAX_NEIS = 32;
dtMeshTile* neis[MAX_NEIS];
int nneis;
// Connect with layers in current tile.
nneis = getTilesAt(header->x, header->y, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
if (neis[j] != tile)
{
connectExtLinks(tile, neis[j], -1);
connectExtLinks(neis[j], tile, -1);
}
connectExtOffMeshLinks(tile, neis[j], -1);
connectExtOffMeshLinks(neis[j], tile, -1);
}
// Connect with neighbour tiles.
for (int i = 0; i < 8; ++i)
{
nneis = getNeighbourTilesAt(header->x, header->y, i, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
connectExtLinks(tile, neis[j], i);
connectExtLinks(neis[j], tile, dtOppositeTile(i));
connectExtOffMeshLinks(tile, neis[j], i);
connectExtOffMeshLinks(neis[j], tile, dtOppositeTile(i));
}
}
if (result)
*result = getTileRef(tile);
return DT_SUCCESS;
}
const dtMeshTile* dtNavMesh::getTileAt(const int x, const int y, const int layer) const
{
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y &&
tile->header->layer == layer)
{
return tile;
}
tile = tile->next;
}
return 0;
}
int dtNavMesh::getNeighbourTilesAt(const int x, const int y, const int side, dtMeshTile** tiles, const int maxTiles) const
{
int nx = x, ny = y;
switch (side)
{
case 0: nx++; break;
case 1: nx++; ny++; break;
case 2: ny++; break;
case 3: nx--; ny++; break;
case 4: nx--; break;
case 5: nx--; ny--; break;
case 6: ny--; break;
case 7: nx++; ny--; break;
};
return getTilesAt(nx, ny, tiles, maxTiles);
}
int dtNavMesh::getTilesAt(const int x, const int y, dtMeshTile** tiles, const int maxTiles) const
{
int n = 0;
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y)
{
if (n < maxTiles)
tiles[n++] = tile;
}
tile = tile->next;
}
return n;
}
/// @par
///
/// This function will not fail if the tiles array is too small to hold the
/// entire result set. It will simply fill the array to capacity.
int dtNavMesh::getTilesAt(const int x, const int y, dtMeshTile const** tiles, const int maxTiles) const
{
int n = 0;
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y)
{
if (n < maxTiles)
tiles[n++] = tile;
}
tile = tile->next;
}
return n;
}
dtTileRef dtNavMesh::getTileRefAt(const int x, const int y, const int layer) const
{
// Find tile based on hash.
int h = computeTileHash(x,y,m_tileLutMask);
dtMeshTile* tile = m_posLookup[h];
while (tile)
{
if (tile->header &&
tile->header->x == x &&
tile->header->y == y &&
tile->header->layer == layer)
{
return getTileRef(tile);
}
tile = tile->next;
}
return 0;
}
const dtMeshTile* dtNavMesh::getTileByRef(dtTileRef ref) const
{
if (!ref)
return 0;
unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref);
unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref);
if ((int)tileIndex >= m_maxTiles)
return 0;
const dtMeshTile* tile = &m_tiles[tileIndex];
if (tile->salt != tileSalt)
return 0;
return tile;
}
int dtNavMesh::getMaxTiles() const
{
return m_maxTiles;
}
dtMeshTile* dtNavMesh::getTile(int i)
{
return &m_tiles[i];
}
const dtMeshTile* dtNavMesh::getTile(int i) const
{
return &m_tiles[i];
}
void dtNavMesh::calcTileLoc(const float* pos, int* tx, int* ty) const
{
*tx = (int)floorf((pos[0]-m_orig[0]) / m_tileWidth);
*ty = (int)floorf((pos[2]-m_orig[2]) / m_tileHeight);
}
dtStatus dtNavMesh::getTileAndPolyByRef(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
if (ip >= (unsigned int)m_tiles[it].header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
*tile = &m_tiles[it];
*poly = &m_tiles[it].polys[ip];
return DT_SUCCESS;
}
/// @par
///
/// @warning Only use this function if it is known that the provided polygon
/// reference is valid. This function is faster than #getTileAndPolyByRef, but
/// it does not validate the reference.
void dtNavMesh::getTileAndPolyByRefUnsafe(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const
{
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
*tile = &m_tiles[it];
*poly = &m_tiles[it].polys[ip];
}
bool dtNavMesh::isValidPolyRef(dtPolyRef ref) const
{
if (!ref) return false;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return false;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return false;
if (ip >= (unsigned int)m_tiles[it].header->polyCount) return false;
return true;
}
/// @par
///
/// This function returns the data for the tile so that, if desired,
/// it can be added back to the navigation mesh at a later point.
///
/// @see #addTile
dtStatus dtNavMesh::removeTile(dtTileRef ref, unsigned char** data, int* dataSize)
{
if (!ref)
return DT_FAILURE | DT_INVALID_PARAM;
unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref);
unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref);
if ((int)tileIndex >= m_maxTiles)
return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[tileIndex];
if (tile->salt != tileSalt)
return DT_FAILURE | DT_INVALID_PARAM;
// Remove tile from hash lookup.
int h = computeTileHash(tile->header->x,tile->header->y,m_tileLutMask);
dtMeshTile* prev = 0;
dtMeshTile* cur = m_posLookup[h];
while (cur)
{
if (cur == tile)
{
if (prev)
prev->next = cur->next;
else
m_posLookup[h] = cur->next;
break;
}
prev = cur;
cur = cur->next;
}
// Remove connections to neighbour tiles.
// Create connections with neighbour tiles.
static const int MAX_NEIS = 32;
dtMeshTile* neis[MAX_NEIS];
int nneis;
// Connect with layers in current tile.
nneis = getTilesAt(tile->header->x, tile->header->y, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
{
if (neis[j] == tile) continue;
unconnectExtLinks(neis[j], tile);
}
// Connect with neighbour tiles.
for (int i = 0; i < 8; ++i)
{
nneis = getNeighbourTilesAt(tile->header->x, tile->header->y, i, neis, MAX_NEIS);
for (int j = 0; j < nneis; ++j)
unconnectExtLinks(neis[j], tile);
}
// Reset tile.
if (tile->flags & DT_TILE_FREE_DATA)
{
// Owns data
dtFree(tile->data);
tile->data = 0;
tile->dataSize = 0;
if (data) *data = 0;
if (dataSize) *dataSize = 0;
}
else
{
if (data) *data = tile->data;
if (dataSize) *dataSize = tile->dataSize;
}
tile->header = 0;
tile->flags = 0;
tile->linksFreeList = 0;
tile->polys = 0;
tile->verts = 0;
tile->links = 0;
tile->detailMeshes = 0;
tile->detailVerts = 0;
tile->detailTris = 0;
tile->bvTree = 0;
tile->offMeshCons = 0;
// Update salt, salt should never be zero.
tile->salt = (tile->salt+1) & ((1<<m_saltBits)-1);
if (tile->salt == 0)
tile->salt++;
// Add to free list.
tile->next = m_nextFree;
m_nextFree = tile;
return DT_SUCCESS;
}
dtTileRef dtNavMesh::getTileRef(const dtMeshTile* tile) const
{
if (!tile) return 0;
const unsigned int it = (unsigned int)(tile - m_tiles);
return (dtTileRef)encodePolyId(tile->salt, it, 0);
}
/// @par
///
/// Example use case:
/// @code
///
/// const dtPolyRef base = navmesh->getPolyRefBase(tile);
/// for (int i = 0; i < tile->header->polyCount; ++i)
/// {
/// const dtPoly* p = &tile->polys[i];
/// const dtPolyRef ref = base | (dtPolyRef)i;
///
/// // Use the reference to access the polygon data.
/// }
/// @endcode
dtPolyRef dtNavMesh::getPolyRefBase(const dtMeshTile* tile) const
{
if (!tile) return 0;
const unsigned int it = (unsigned int)(tile - m_tiles);
return encodePolyId(tile->salt, it, 0);
}
struct dtTileState
{
int magic; // Magic number, used to identify the data.
int version; // Data version number.
dtTileRef ref; // Tile ref at the time of storing the data.
};
struct dtPolyState
{
unsigned short flags; // Flags (see dtPolyFlags).
unsigned char area; // Area ID of the polygon.
};
/// @see #storeTileState
int dtNavMesh::getTileStateSize(const dtMeshTile* tile) const
{
if (!tile) return 0;
const int headerSize = dtAlign4(sizeof(dtTileState));
const int polyStateSize = dtAlign4(sizeof(dtPolyState) * tile->header->polyCount);
return headerSize + polyStateSize;
}
/// @par
///
/// Tile state includes non-structural data such as polygon flags, area ids, etc.
/// @note The state data is only valid until the tile reference changes.
/// @see #getTileStateSize, #restoreTileState
dtStatus dtNavMesh::storeTileState(const dtMeshTile* tile, unsigned char* data, const int maxDataSize) const
{
// Make sure there is enough space to store the state.
const int sizeReq = getTileStateSize(tile);
if (maxDataSize < sizeReq)
return DT_FAILURE | DT_BUFFER_TOO_SMALL;
dtTileState* tileState = (dtTileState*)data; data += dtAlign4(sizeof(dtTileState));
dtPolyState* polyStates = (dtPolyState*)data; data += dtAlign4(sizeof(dtPolyState) * tile->header->polyCount);
// Store tile state.
tileState->magic = DT_NAVMESH_STATE_MAGIC;
tileState->version = DT_NAVMESH_STATE_VERSION;
tileState->ref = getTileRef(tile);
// Store per poly state.
for (int i = 0; i < tile->header->polyCount; ++i)
{
const dtPoly* p = &tile->polys[i];
dtPolyState* s = &polyStates[i];
s->flags = p->flags;
s->area = p->getArea();
}
return DT_SUCCESS;
}
/// @par
///
/// Tile state includes non-structural data such as polygon flags, area ids, etc.
/// @note This function does not impact the tile's #dtTileRef and #dtPolyRef's.
/// @see #storeTileState
dtStatus dtNavMesh::restoreTileState(dtMeshTile* tile, const unsigned char* data, const int maxDataSize)
{
// Make sure there is enough space to store the state.
const int sizeReq = getTileStateSize(tile);
if (maxDataSize < sizeReq)
return DT_FAILURE | DT_INVALID_PARAM;
const dtTileState* tileState = (const dtTileState*)data; data += dtAlign4(sizeof(dtTileState));
const dtPolyState* polyStates = (const dtPolyState*)data; data += dtAlign4(sizeof(dtPolyState) * tile->header->polyCount);
// Check that the restore is possible.
if (tileState->magic != DT_NAVMESH_STATE_MAGIC)
return DT_FAILURE | DT_WRONG_MAGIC;
if (tileState->version != DT_NAVMESH_STATE_VERSION)
return DT_FAILURE | DT_WRONG_VERSION;
if (tileState->ref != getTileRef(tile))
return DT_FAILURE | DT_INVALID_PARAM;
// Restore per poly state.
for (int i = 0; i < tile->header->polyCount; ++i)
{
dtPoly* p = &tile->polys[i];
const dtPolyState* s = &polyStates[i];
p->flags = s->flags;
p->setArea(s->area);
}
return DT_SUCCESS;
}
/// @par
///
/// Off-mesh connections are stored in the navigation mesh as special 2-vertex
/// polygons with a single edge. At least one of the vertices is expected to be
/// inside a normal polygon. So an off-mesh connection is "entered" from a
/// normal polygon at one of its endpoints. This is the polygon identified by
/// the prevRef parameter.
dtStatus dtNavMesh::getOffMeshConnectionPolyEndPoints(dtPolyRef prevRef, dtPolyRef polyRef, float* startPos, float* endPos) const
{
unsigned int salt, it, ip;
if (!polyRef)
return DT_FAILURE;
// Get current polygon
decodePolyId(polyRef, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
// Make sure that the current poly is indeed off-mesh link.
if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION)
return DT_FAILURE;
// Figure out which way to hand out the vertices.
int idx0 = 0, idx1 = 1;
// Find link that points to first vertex.
for (unsigned int i = poly->firstLink; i != DT_NULL_LINK; i = tile->links[i].next)
{
if (tile->links[i].edge == 0)
{
if (tile->links[i].ref != prevRef)
{
idx0 = 1;
idx1 = 0;
}
break;
}
}
dtVcopy(startPos, &tile->verts[poly->verts[idx0]*3]);
dtVcopy(endPos, &tile->verts[poly->verts[idx1]*3]);
return DT_SUCCESS;
}
const dtOffMeshConnection* dtNavMesh::getOffMeshConnectionByRef(dtPolyRef ref) const
{
unsigned int salt, it, ip;
if (!ref)
return 0;
// Get current polygon
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return 0;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return 0;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return 0;
const dtPoly* poly = &tile->polys[ip];
// Make sure that the current poly is indeed off-mesh link.
if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION)
return 0;
const unsigned int idx = ip - tile->header->offMeshBase;
dtAssert(idx < (unsigned int)tile->header->offMeshConCount);
return &tile->offMeshCons[idx];
}
dtStatus dtNavMesh::setPolyFlags(dtPolyRef ref, unsigned short flags)
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
dtPoly* poly = &tile->polys[ip];
// Change flags.
poly->flags = flags;
return DT_SUCCESS;
}
dtStatus dtNavMesh::getPolyFlags(dtPolyRef ref, unsigned short* resultFlags) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
*resultFlags = poly->flags;
return DT_SUCCESS;
}
dtStatus dtNavMesh::setPolyArea(dtPolyRef ref, unsigned char area)
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
dtPoly* poly = &tile->polys[ip];
poly->setArea(area);
return DT_SUCCESS;
}
dtStatus dtNavMesh::getPolyArea(dtPolyRef ref, unsigned char* resultArea) const
{
if (!ref) return DT_FAILURE;
unsigned int salt, it, ip;
decodePolyId(ref, salt, it, ip);
if (it >= (unsigned int)m_maxTiles) return DT_FAILURE | DT_INVALID_PARAM;
if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE | DT_INVALID_PARAM;
const dtMeshTile* tile = &m_tiles[it];
if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE | DT_INVALID_PARAM;
const dtPoly* poly = &tile->polys[ip];
*resultArea = poly->getArea();
return DT_SUCCESS;
}
| gpl-2.0 |
chrisc93/android_kernel_samsung_jf | drivers/interceptor/linux_mutex.c | 631 | 2544 | /* Netfilter Driver for IPSec VPN Client
*
* Copyright(c) 2012 Samsung Electronics
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* linux_mutex.c
*
* Linux interceptor kernel mutex API implementation.
*
*/
#include "linux_internal.h"
#include "linux_mutex_internal.h"
extern SshInterceptor ssh_interceptor_context;
Boolean ssh_kernel_mutex_init(SshKernelMutex mutex)
{
spin_lock_init(&mutex->lock);
#ifdef DEBUG_LIGHT
mutex->taken = FALSE;
mutex->jiffies = 0;
#endif
return TRUE;
}
/* Allocates a simple mutex. This should be as fast as possible, but work
between different processors in a multiprocessor machine. This need
not work between different independent processes. */
SshKernelMutex
ssh_kernel_mutex_alloc(void)
{
SshKernelMutex m;
m = ssh_calloc(1, sizeof(struct SshKernelMutexRec));
if (m == NULL)
return NULL;
if (!ssh_kernel_mutex_init(m))
{
ssh_free(m);
m = NULL;
}
return m;
}
/* Frees the given mutex. The mutex must not be locked when it is
freed. */
void ssh_kernel_mutex_uninit(SshKernelMutex mutex)
{
SSH_ASSERT(!mutex->taken);
}
void ssh_kernel_mutex_free(SshKernelMutex mutex)
{
if (mutex)
{
ssh_kernel_mutex_uninit(mutex);
ssh_free(mutex);
}
}
#ifdef KERNEL_MUTEX_USE_FUNCTIONS
/* Locks the mutex. Only one thread of execution can have a mutex locked
at a time. This will block until execution can continue. One should
not keep mutexes locked for extended periods of time. */
void
ssh_kernel_mutex_lock_i(SshKernelMutex mutex)
{
SSH_LINUX_STATISTICS(ssh_interceptor_context,
{ ssh_interceptor_context->stats.num_light_locks++; });
spin_lock(&mutex->lock);
SSH_ASSERT(!mutex->taken);
#ifdef DEBUG_LIGHT
mutex->taken = TRUE;
mutex->jiffies = jiffies;
#endif /* DEBUG_LIGHT */
}
/* Unlocks the mutex. If other threads are waiting to lock the mutex,
one of them will get the lock and continue execution. */
void
ssh_kernel_mutex_unlock_i(SshKernelMutex mutex)
{
SSH_ASSERT(mutex->taken);
#ifdef DEBUG_LIGHT
mutex->taken = FALSE;
#endif /* DEBUG_LIGHT */
spin_unlock(&mutex->lock);
}
#endif /* KERNEL_MUTEX_USE_FUNCTIONS */
#ifdef DEBUG_LIGHT
/* Check that the mutex is locked. It is a fatal error if it is not. */
void
ssh_kernel_mutex_assert_is_locked(SshKernelMutex mutex)
{
SSH_ASSERT(mutex->taken);
}
#endif /* DEBUG_LIGHT */
| gpl-2.0 |
shesselba/linux-dove | fs/reiserfs/xattr_user.c | 631 | 1292 | #include "reiserfs.h"
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "xattr.h"
#include <linux/uaccess.h>
static int
user_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
int handler_flags)
{
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
}
static int
user_set(struct dentry *dentry, const char *name, const void *buffer,
size_t size, int flags, int handler_flags)
{
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
}
static size_t user_list(struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len, int handler_flags)
{
const size_t len = name_len + 1;
if (!reiserfs_xattrs_user(dentry->d_sb))
return 0;
if (list && len <= list_size) {
memcpy(list, name, name_len);
list[name_len] = '\0';
}
return len;
}
const struct xattr_handler reiserfs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = user_get,
.set = user_set,
.list = user_list,
};
| gpl-2.0 |
duynhat1902/lte_kernel_f260s | net/l2tp/l2tp_ppp.c | 1911 | 47552 | /*****************************************************************************
* Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
*
* PPPoX --- Generic PPP encapsulation socket family
* PPPoL2TP --- PPP over L2TP (RFC 2661)
*
* Version: 2.0.0
*
* Authors: James Chapman (jchapman@katalix.com)
*
* Based on original work by Martijn van Oosterhout <kleptog@svana.org>
*
* License:
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
/* This driver handles only L2TP data frames; control frames are handled by a
* userspace application.
*
* To send data in an L2TP session, userspace opens a PPPoL2TP socket and
* attaches it to a bound UDP socket with local tunnel_id / session_id and
* peer tunnel_id / session_id set. Data can then be sent or received using
* regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
* can be read or modified using ioctl() or [gs]etsockopt() calls.
*
* When a PPPoL2TP socket is connected with local and peer session_id values
* zero, the socket is treated as a special tunnel management socket.
*
* Here's example userspace code to create a socket for sending/receiving data
* over an L2TP session:-
*
* struct sockaddr_pppol2tp sax;
* int fd;
* int session_fd;
*
* fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
*
* sax.sa_family = AF_PPPOX;
* sax.sa_protocol = PX_PROTO_OL2TP;
* sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
* sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
* sax.pppol2tp.addr.sin_port = addr->sin_port;
* sax.pppol2tp.addr.sin_family = AF_INET;
* sax.pppol2tp.s_tunnel = tunnel_id;
* sax.pppol2tp.s_session = session_id;
* sax.pppol2tp.d_tunnel = peer_tunnel_id;
* sax.pppol2tp.d_session = peer_session_id;
*
* session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
*
* A pppd plugin that allows PPP traffic to be carried over L2TP using
* this driver is available from the OpenL2TP project at
* http://openl2tp.sourceforge.net.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/netdevice.h>
#include <linux/net.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/if_pppox.h>
#include <linux/if_pppol2tp.h>
#include <net/sock.h>
#include <linux/ppp_channel.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/file.h>
#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/proc_fs.h>
#include <linux/l2tp.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/udp.h>
#include <net/xfrm.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
#include "l2tp_core.h"
#define PPPOL2TP_DRV_VERSION "V2.0"
/* Space for UDP, L2TP and PPP headers */
#define PPPOL2TP_HEADER_OVERHEAD 40
#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
do { \
if ((_mask) & (_type)) \
printk(_lvl "PPPOL2TP: " _fmt, ##args); \
} while (0)
/* Number of bytes to build transmit L2TP headers.
* Unfortunately the size is different depending on whether sequence numbers
* are enabled.
*/
#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
/* Private data of each session. This data lives at the end of struct
* l2tp_session, referenced via session->priv[].
*/
struct pppol2tp_session {
int owner; /* pid that opened the socket */
struct sock *sock; /* Pointer to the session
* PPPoX socket */
struct sock *tunnel_sock; /* Pointer to the tunnel UDP
* socket */
int flags; /* accessed by PPPIOCGFLAGS.
* Unused. */
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static const struct ppp_channel_ops pppol2tp_chan_ops = {
.start_xmit = pppol2tp_xmit,
};
static const struct proto_ops pppol2tp_ops;
/* Helpers to obtain tunnel/session contexts from sockets.
*/
static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
{
struct l2tp_session *session;
if (sk == NULL)
return NULL;
sock_hold(sk);
session = (struct l2tp_session *)(sk->sk_user_data);
if (session == NULL) {
sock_put(sk);
goto out;
}
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
out:
return session;
}
/*****************************************************************************
* Receive data handling
*****************************************************************************/
static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
{
/* Skip PPP header, if present. In testing, Microsoft L2TP clients
* don't send the PPP header (PPP header compression enabled), but
* other clients can include the header. So we cope with both cases
* here. The PPP header is always FF03 when using L2TP.
*
* Note that skb->data[] isn't dereferenced from a u16 ptr here since
* the field may be unaligned.
*/
if (!pskb_may_pull(skb, 2))
return 1;
if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
skb_pull(skb, 2);
return 0;
}
/* Receive message. This is the recvmsg for the PPPoL2TP socket.
*/
static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len,
int flags)
{
int err;
struct sk_buff *skb;
struct sock *sk = sock->sk;
err = -EIO;
if (sk->sk_state & PPPOX_BOUND)
goto end;
msg->msg_namelen = 0;
err = 0;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
goto end;
if (len > skb->len)
len = skb->len;
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
if (likely(err == 0))
err = len;
kfree_skb(skb);
end:
return err;
}
static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct sock *sk = NULL;
/* If the socket is bound, send it in to PPP's input queue. Otherwise
* queue it on the session socket.
*/
sk = ps->sock;
if (sk == NULL)
goto no_sock;
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
"%s: recv %d byte data frame, passing to ppp\n",
session->name, data_len);
/* We need to forget all info related to the L2TP packet
* gathered in the skb as we are going to reuse the same
* skb for the inner packet.
* Namely we need to:
* - reset xfrm (IPSec) information as it applies to
* the outer L2TP packet and not to the inner one
* - release the dst to force a route lookup on the inner
* IP packet since skb->dst currently points to the dst
* of the UDP tunnel
* - reset netfilter information as it doesn't apply
* to the inner packet either
*/
secpath_reset(skb);
skb_dst_drop(skb);
nf_reset(skb);
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
"%s: socket not bound\n", session->name);
/* Not bound. Nothing we can do, so discard. */
session->stats.rx_errors++;
kfree_skb(skb);
}
return;
no_sock:
PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
"%s: no socket\n", session->name);
kfree_skb(skb);
}
static void pppol2tp_session_sock_hold(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
if (ps->sock)
sock_hold(ps->sock);
}
static void pppol2tp_session_sock_put(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
if (ps->sock)
sock_put(ps->sock);
}
/************************************************************************
* Transmit handling
***********************************************************************/
/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
* when a user application does a sendmsg() on the session socket. L2TP and
* PPP headers must be inserted into the user's data.
*/
static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len)
{
static const unsigned char ppph[2] = { 0xff, 0x03 };
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
int uhlen;
error = -ENOTCONN;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto error;
/* Get session and tunnel contexts */
error = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto error;
ps = l2tp_session_priv(session);
tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
if (tunnel == NULL)
goto error_put_sess;
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
/* Allocate a socket buffer */
error = -ENOMEM;
skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + session->hdr_len +
sizeof(ppph) + total_len,
0, GFP_KERNEL);
if (!skb)
goto error_put_sess_tun;
/* Reserve space for headers. */
skb_reserve(skb, NET_SKB_PAD);
skb_reset_network_header(skb);
skb_reserve(skb, sizeof(struct iphdr));
skb_reset_transport_header(skb);
skb_reserve(skb, uhlen);
/* Add PPP header */
skb->data[0] = ppph[0];
skb->data[1] = ppph[1];
skb_put(skb, 2);
/* Copy user data into skb */
error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
if (error < 0) {
kfree_skb(skb);
goto error_put_sess_tun;
}
skb_put(skb, total_len);
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(ps->tunnel_sock);
return error;
error_put_sess_tun:
sock_put(ps->tunnel_sock);
error_put_sess:
sock_put(sk);
error:
return error;
}
/* Transmit function called by generic PPP driver. Sends PPP frame
* over PPPoL2TP socket.
*
* This is almost the same as pppol2tp_sendmsg(), but rather than
* being called with a msghdr from userspace, it is called with a skb
* from the kernel.
*
* The supplied skb from ppp doesn't have enough headroom for the
* insertion of L2TP, UDP and IP headers so we need to allocate more
* headroom in the skb. This will create a cloned skb. But we must be
* careful in the error case because the caller will expect to free
* the skb it supplied, not our cloned skb. So we take care to always
* leave the original skb unfreed if we return an error.
*/
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
{
static const u8 ppph[2] = { 0xff, 0x03 };
struct sock *sk = (struct sock *) chan->private;
struct sock *sk_tun;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
int old_headroom;
int new_headroom;
int uhlen, headroom;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
/* Get session and tunnel contexts from the socket */
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto abort;
ps = l2tp_session_priv(session);
sk_tun = ps->tunnel_sock;
if (sk_tun == NULL)
goto abort_put_sess;
tunnel = l2tp_sock_to_tunnel(sk_tun);
if (tunnel == NULL)
goto abort_put_sess;
old_headroom = skb_headroom(skb);
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
headroom = NET_SKB_PAD +
sizeof(struct iphdr) + /* IP header */
uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
session->hdr_len + /* L2TP header */
sizeof(ppph); /* PPP header */
if (skb_cow_head(skb, headroom))
goto abort_put_sess_tun;
new_headroom = skb_headroom(skb);
skb->truesize += new_headroom - old_headroom;
/* Setup PPP header */
__skb_push(skb, sizeof(ppph));
skb->data[0] = ppph[0];
skb->data[1] = ppph[1];
l2tp_xmit_skb(session, skb, session->hdr_len);
sock_put(sk_tun);
sock_put(sk);
return 1;
abort_put_sess_tun:
sock_put(sk_tun);
abort_put_sess:
sock_put(sk);
abort:
/* Free the original skb */
kfree_skb(skb);
return 1;
}
/*****************************************************************************
* Session (and tunnel control) socket create/destroy.
*****************************************************************************/
/* Called by l2tp_core when a session socket is being closed.
*/
static void pppol2tp_session_close(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct sock *sk = ps->sock;
struct sk_buff *skb;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
if (session->session_id == 0)
goto out;
if (sk != NULL) {
lock_sock(sk);
if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_DEAD;
sk->sk_state_change(sk);
}
/* Purge any queued data */
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
while ((skb = skb_dequeue(&session->reorder_q))) {
kfree_skb(skb);
sock_put(sk);
}
release_sock(sk);
}
out:
return;
}
/* Really kill the session socket. (Called from sock_put() if
* refcnt == 0.)
*/
static void pppol2tp_session_destruct(struct sock *sk)
{
struct l2tp_session *session;
if (sk->sk_user_data != NULL) {
session = sk->sk_user_data;
if (session == NULL)
goto out;
sk->sk_user_data = NULL;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
l2tp_session_dec_refcount(session);
}
out:
return;
}
/* Called when the PPPoX socket (session) is closed.
*/
static int pppol2tp_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
int error;
if (!sk)
return 0;
error = -EBADF;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) != 0)
goto error;
pppox_unbind_sock(sk);
/* Signal the death of the socket. */
sk->sk_state = PPPOX_DEAD;
sock_orphan(sk);
sock->sk = NULL;
session = pppol2tp_sock_to_session(sk);
/* Purge any queued data */
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
if (session != NULL) {
struct sk_buff *skb;
while ((skb = skb_dequeue(&session->reorder_q))) {
kfree_skb(skb);
sock_put(sk);
}
sock_put(sk);
}
release_sock(sk);
/* This will delete the session context via
* pppol2tp_session_destruct() if the socket's refcnt drops to
* zero.
*/
sock_put(sk);
return 0;
error:
release_sock(sk);
return error;
}
static struct proto pppol2tp_sk_proto = {
.name = "PPPOL2TP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct pppox_sock),
};
static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
rc = l2tp_udp_encap_recv(sk, skb);
if (rc)
kfree_skb(skb);
return NET_RX_SUCCESS;
}
/* socket() handler. Initialize a new struct sock.
*/
static int pppol2tp_create(struct net *net, struct socket *sock)
{
int error = -ENOMEM;
struct sock *sk;
sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
if (!sk)
goto out;
sock_init_data(sock, sk);
sock->state = SS_UNCONNECTED;
sock->ops = &pppol2tp_ops;
sk->sk_backlog_rcv = pppol2tp_backlog_recv;
sk->sk_protocol = PX_PROTO_OL2TP;
sk->sk_family = PF_PPPOX;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_destruct = pppol2tp_session_destruct;
error = 0;
out:
return error;
}
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
static void pppol2tp_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
struct pppol2tp_session *ps = l2tp_session_priv(session);
if (ps) {
struct pppox_sock *po = pppox_sk(ps->sock);
if (po)
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
}
}
#endif
/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
*/
static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
int sockaddr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
struct pppox_sock *po = pppox_sk(sk);
struct l2tp_session *session = NULL;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
struct dst_entry *dst;
struct l2tp_session_cfg cfg = { 0, };
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
int ver = 2;
int fd;
lock_sock(sk);
error = -EINVAL;
if (sp->sa_protocol != PX_PROTO_OL2TP)
goto end;
/* Check for already bound sockets */
error = -EBUSY;
if (sk->sk_state & PPPOX_CONNECTED)
goto end;
/* We don't supporting rebinding anyway */
error = -EALREADY;
if (sk->sk_user_data)
goto end; /* socket is already attached */
/* Get params from socket address. Handle L2TPv2 and L2TPv3 */
if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
fd = sp->pppol2tp.fd;
tunnel_id = sp->pppol2tp.s_tunnel;
peer_tunnel_id = sp->pppol2tp.d_tunnel;
session_id = sp->pppol2tp.s_session;
peer_session_id = sp->pppol2tp.d_session;
} else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
ver = 3;
fd = sp3->pppol2tp.fd;
tunnel_id = sp3->pppol2tp.s_tunnel;
peer_tunnel_id = sp3->pppol2tp.d_tunnel;
session_id = sp3->pppol2tp.s_session;
peer_session_id = sp3->pppol2tp.d_session;
} else {
error = -EINVAL;
goto end; /* bad socket address */
}
/* Don't bind if tunnel_id is 0 */
error = -EINVAL;
if (tunnel_id == 0)
goto end;
tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
/* Special case: create tunnel context if session_id and
* peer_session_id is 0. Otherwise look up tunnel using supplied
* tunnel id.
*/
if ((session_id == 0) && (peer_session_id == 0)) {
if (tunnel == NULL) {
struct l2tp_tunnel_cfg tcfg = {
.encap = L2TP_ENCAPTYPE_UDP,
.debug = 0,
};
error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
if (error < 0)
goto end;
}
} else {
/* Error if we can't find the tunnel */
error = -ENOENT;
if (tunnel == NULL)
goto end;
/* Error if socket is not prepped */
if (tunnel->sock == NULL)
goto end;
}
if (tunnel->recv_payload_hook == NULL)
tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
if (tunnel->peer_tunnel_id == 0) {
if (ver == 2)
tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
else
tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
}
/* Create session if it doesn't already exist. We handle the
* case where a session was previously created by the netlink
* interface by checking that the session doesn't already have
* a socket and its tunnel socket are what we expect. If any
* of those checks fail, return EEXIST to the caller.
*/
session = l2tp_session_find(sock_net(sk), tunnel, session_id);
if (session == NULL) {
/* Default MTU must allow space for UDP/L2TP/PPP
* headers.
*/
cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
/* Allocate and initialize a new session context. */
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, &cfg);
if (session == NULL) {
error = -ENOMEM;
goto end;
}
} else {
ps = l2tp_session_priv(session);
error = -EEXIST;
if (ps->sock != NULL)
goto end;
/* consistency checks */
if (ps->tunnel_sock != tunnel->sock)
goto end;
}
/* Associate session with its PPPoL2TP socket */
ps = l2tp_session_priv(session);
ps->owner = current->pid;
ps->sock = sk;
ps->tunnel_sock = tunnel->sock;
session->recv_skb = pppol2tp_recv;
session->session_close = pppol2tp_session_close;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
session->show = pppol2tp_show;
#endif
/* We need to know each time a skb is dropped from the reorder
* queue.
*/
session->ref = pppol2tp_session_sock_hold;
session->deref = pppol2tp_session_sock_put;
/* If PMTU discovery was enabled, use the MTU that was discovered */
dst = sk_dst_get(sk);
if (dst != NULL) {
u32 pmtu = dst_mtu(__sk_dst_get(sk));
if (pmtu != 0)
session->mtu = session->mru = pmtu -
PPPOL2TP_HEADER_OVERHEAD;
dst_release(dst);
}
/* Special case: if source & dest session_id == 0x0000, this
* socket is being created to manage the tunnel. Just set up
* the internal context for use by ioctl() and sockopt()
* handlers.
*/
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
error = 0;
goto out_no_ppp;
}
/* The only header we need to worry about is the L2TP
* header. This size is different depending on whether
* sequence numbers are enabled for the data channel.
*/
po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
po->chan.private = sk;
po->chan.ops = &pppol2tp_chan_ops;
po->chan.mtu = session->mtu;
error = ppp_register_net_channel(sock_net(sk), &po->chan);
if (error)
goto end;
out_no_ppp:
/* This is how we get the session context from the socket. */
sk->sk_user_data = session;
sk->sk_state = PPPOX_CONNECTED;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: created\n", session->name);
end:
release_sock(sk);
return error;
}
#ifdef CONFIG_L2TP_V3
/* Called when creating sessions via the netlink interface.
*/
static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
int error;
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct pppol2tp_session *ps;
tunnel = l2tp_tunnel_find(net, tunnel_id);
/* Error if we can't find the tunnel */
error = -ENOENT;
if (tunnel == NULL)
goto out;
/* Error if tunnel socket is not prepped */
if (tunnel->sock == NULL)
goto out;
/* Check that this session doesn't already exist */
error = -EEXIST;
session = l2tp_session_find(net, tunnel, session_id);
if (session != NULL)
goto out;
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
if (cfg->mru == 0)
cfg->mru = cfg->mtu;
/* Allocate and initialize a new session context. */
error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
if (session == NULL)
goto out;
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: created\n", session->name);
error = 0;
out:
return error;
}
/* Called when deleting sessions via the netlink interface.
*/
static int pppol2tp_session_delete(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
if (ps->sock == NULL)
l2tp_session_dec_refcount(session);
return 0;
}
#endif /* CONFIG_L2TP_V3 */
/* getname() support.
*/
static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
int *usockaddr_len, int peer)
{
int len = 0;
int error = 0;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct sock *sk = sock->sk;
struct inet_sock *inet;
struct pppol2tp_session *pls;
error = -ENOTCONN;
if (sk == NULL)
goto end;
if (sk->sk_state != PPPOX_CONNECTED)
goto end;
error = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto end;
pls = l2tp_session_priv(session);
tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
if (tunnel == NULL) {
error = -EBADF;
goto end_put_sess;
}
inet = inet_sk(tunnel->sock);
if (tunnel->version == 2) {
struct sockaddr_pppol2tp sp;
len = sizeof(sp);
memset(&sp, 0, len);
sp.sa_family = AF_PPPOX;
sp.sa_protocol = PX_PROTO_OL2TP;
sp.pppol2tp.fd = tunnel->fd;
sp.pppol2tp.pid = pls->owner;
sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
sp.pppol2tp.s_session = session->session_id;
sp.pppol2tp.d_session = session->peer_session_id;
sp.pppol2tp.addr.sin_family = AF_INET;
sp.pppol2tp.addr.sin_port = inet->inet_dport;
sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
memcpy(uaddr, &sp, len);
} else if (tunnel->version == 3) {
struct sockaddr_pppol2tpv3 sp;
len = sizeof(sp);
memset(&sp, 0, len);
sp.sa_family = AF_PPPOX;
sp.sa_protocol = PX_PROTO_OL2TP;
sp.pppol2tp.fd = tunnel->fd;
sp.pppol2tp.pid = pls->owner;
sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
sp.pppol2tp.s_session = session->session_id;
sp.pppol2tp.d_session = session->peer_session_id;
sp.pppol2tp.addr.sin_family = AF_INET;
sp.pppol2tp.addr.sin_port = inet->inet_dport;
sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
memcpy(uaddr, &sp, len);
}
*usockaddr_len = len;
sock_put(pls->tunnel_sock);
end_put_sess:
sock_put(sk);
error = 0;
end:
return error;
}
/****************************************************************************
* ioctl() handlers.
*
* The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
* sockets. However, in order to control kernel tunnel features, we allow
* userspace to create a special "tunnel" PPPoX socket which is used for
* control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
* the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
* calls.
****************************************************************************/
static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
struct l2tp_stats *stats)
{
dest->tx_packets = stats->tx_packets;
dest->tx_bytes = stats->tx_bytes;
dest->tx_errors = stats->tx_errors;
dest->rx_packets = stats->rx_packets;
dest->rx_bytes = stats->rx_bytes;
dest->rx_seq_discards = stats->rx_seq_discards;
dest->rx_oos_packets = stats->rx_oos_packets;
dest->rx_errors = stats->rx_errors;
}
/* Session ioctl helper.
*/
static int pppol2tp_session_ioctl(struct l2tp_session *session,
unsigned int cmd, unsigned long arg)
{
struct ifreq ifr;
int err = 0;
struct sock *sk;
int val = (int) arg;
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
"%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
session->name, cmd, arg);
sk = ps->sock;
sock_hold(sk);
switch (cmd) {
case SIOCGIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
ifr.ifr_mtu = session->mtu;
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get mtu=%d\n", session->name, session->mtu);
err = 0;
break;
case SIOCSIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
session->mtu = ifr.ifr_mtu;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set mtu=%d\n", session->name, session->mtu);
err = 0;
break;
case PPPIOCGMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (put_user(session->mru, (int __user *) arg))
break;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get mru=%d\n", session->name, session->mru);
err = 0;
break;
case PPPIOCSMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
session->mru = val;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set mru=%d\n", session->name, session->mru);
err = 0;
break;
case PPPIOCGFLAGS:
err = -EFAULT;
if (put_user(ps->flags, (int __user *) arg))
break;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get flags=%d\n", session->name, ps->flags);
err = 0;
break;
case PPPIOCSFLAGS:
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set flags=%d\n", session->name, ps->flags);
err = 0;
break;
case PPPIOCGL2TPSTATS:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
memset(&stats, 0, sizeof(stats));
stats.tunnel_id = tunnel->tunnel_id;
stats.session_id = session->session_id;
pppol2tp_copy_stats(&stats, &session->stats);
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get L2TP stats\n", session->name);
err = 0;
break;
default:
err = -ENOSYS;
break;
}
sock_put(sk);
return err;
}
/* Tunnel ioctl helper.
*
* Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
* specifies a session_id, the session ioctl handler is called. This allows an
* application to retrieve session stats via a tunnel socket.
*/
static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct sock *sk;
struct pppol2tp_ioc_stats stats;
PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
"%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
tunnel->name, cmd, arg);
sk = tunnel->sock;
sock_hold(sk);
switch (cmd) {
case PPPIOCGL2TPSTATS:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
if (copy_from_user(&stats, (void __user *) arg,
sizeof(stats))) {
err = -EFAULT;
break;
}
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
if (session != NULL)
err = pppol2tp_session_ioctl(session, cmd, arg);
else
err = -EBADR;
break;
}
#ifdef CONFIG_XFRM
stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
#endif
pppol2tp_copy_stats(&stats, &tunnel->stats);
if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
err = -EFAULT;
break;
}
PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get L2TP stats\n", tunnel->name);
err = 0;
break;
default:
err = -ENOSYS;
break;
}
sock_put(sk);
return err;
}
/* Main ioctl() handler.
* Dispatch to tunnel or session helpers depending on the socket.
*/
static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
int err;
if (!sk)
return 0;
err = -EBADF;
if (sock_flag(sk, SOCK_DEAD) != 0)
goto end;
err = -ENOTCONN;
if ((sk->sk_user_data == NULL) ||
(!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
goto end;
/* Get session context from the socket */
err = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto end;
/* Special case: if session's session_id is zero, treat ioctl as a
* tunnel ioctl
*/
ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
err = -EBADF;
tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
if (tunnel == NULL)
goto end_put_sess;
err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
sock_put(ps->tunnel_sock);
goto end_put_sess;
}
err = pppol2tp_session_ioctl(session, cmd, arg);
end_put_sess:
sock_put(sk);
end:
return err;
}
/*****************************************************************************
* setsockopt() / getsockopt() support.
*
* The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
* sockets. In order to control kernel tunnel features, we allow userspace to
* create a special "tunnel" PPPoX socket which is used for control only.
* Tunnel PPPoX sockets have session_id == 0 and simply allow the user
* application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
*****************************************************************************/
/* Tunnel setsockopt() helper.
*/
static int pppol2tp_tunnel_setsockopt(struct sock *sk,
struct l2tp_tunnel *tunnel,
int optname, int val)
{
int err = 0;
switch (optname) {
case PPPOL2TP_SO_DEBUG:
tunnel->debug = val;
PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set debug=%x\n", tunnel->name, tunnel->debug);
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
/* Session setsockopt helper.
*/
static int pppol2tp_session_setsockopt(struct sock *sk,
struct l2tp_session *session,
int optname, int val)
{
int err = 0;
struct pppol2tp_session *ps = l2tp_session_priv(session);
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
if ((val != 0) && (val != 1)) {
err = -EINVAL;
break;
}
session->recv_seq = val ? -1 : 0;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set recv_seq=%d\n", session->name, session->recv_seq);
break;
case PPPOL2TP_SO_SENDSEQ:
if ((val != 0) && (val != 1)) {
err = -EINVAL;
break;
}
session->send_seq = val ? -1 : 0;
{
struct sock *ssk = ps->sock;
struct pppox_sock *po = pppox_sk(ssk);
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set send_seq=%d\n", session->name, session->send_seq);
break;
case PPPOL2TP_SO_LNSMODE:
if ((val != 0) && (val != 1)) {
err = -EINVAL;
break;
}
session->lns_mode = val ? -1 : 0;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set lns_mode=%d\n", session->name, session->lns_mode);
break;
case PPPOL2TP_SO_DEBUG:
session->debug = val;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set debug=%x\n", session->name, session->debug);
break;
case PPPOL2TP_SO_REORDERTO:
session->reorder_timeout = msecs_to_jiffies(val);
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
/* Main setsockopt() entry point.
* Does API checks, then calls either the tunnel or session setsockopt
* handler, according to whether the PPPoL2TP socket is a for a regular
* session or the special tunnel type.
*/
static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
struct pppol2tp_session *ps;
int val;
int err;
if (level != SOL_PPPOL2TP)
return udp_prot.setsockopt(sk, level, optname, optval, optlen);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
err = -ENOTCONN;
if (sk->sk_user_data == NULL)
goto end;
/* Get session context from the socket */
err = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto end;
/* Special case: if session_id == 0x0000, treat as operation on tunnel
*/
ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
err = -EBADF;
tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
if (tunnel == NULL)
goto end_put_sess;
err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
sock_put(ps->tunnel_sock);
} else
err = pppol2tp_session_setsockopt(sk, session, optname, val);
err = 0;
end_put_sess:
sock_put(sk);
end:
return err;
}
/* Tunnel getsockopt helper. Called with sock locked.
*/
static int pppol2tp_tunnel_getsockopt(struct sock *sk,
struct l2tp_tunnel *tunnel,
int optname, int *val)
{
int err = 0;
switch (optname) {
case PPPOL2TP_SO_DEBUG:
*val = tunnel->debug;
PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get debug=%x\n", tunnel->name, tunnel->debug);
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
/* Session getsockopt helper. Called with sock locked.
*/
static int pppol2tp_session_getsockopt(struct sock *sk,
struct l2tp_session *session,
int optname, int *val)
{
int err = 0;
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
*val = session->recv_seq;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get recv_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_SENDSEQ:
*val = session->send_seq;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get send_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_LNSMODE:
*val = session->lns_mode;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get lns_mode=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_DEBUG:
*val = session->debug;
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get debug=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_REORDERTO:
*val = (int) jiffies_to_msecs(session->reorder_timeout);
PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
"%s: get reorder_timeout=%d\n", session->name, *val);
break;
default:
err = -ENOPROTOOPT;
}
return err;
}
/* Main getsockopt() entry point.
* Does API checks, then calls either the tunnel or session getsockopt
* handler, according to whether the PPPoX socket is a for a regular session
* or the special tunnel type.
*/
static int pppol2tp_getsockopt(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel;
int val, len;
int err;
struct pppol2tp_session *ps;
if (level != SOL_PPPOL2TP)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
if (get_user(len, (int __user *) optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
err = -ENOTCONN;
if (sk->sk_user_data == NULL)
goto end;
/* Get the session context */
err = -EBADF;
session = pppol2tp_sock_to_session(sk);
if (session == NULL)
goto end;
/* Special case: if session_id == 0x0000, treat as operation on tunnel */
ps = l2tp_session_priv(session);
if ((session->session_id == 0) &&
(session->peer_session_id == 0)) {
err = -EBADF;
tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
if (tunnel == NULL)
goto end_put_sess;
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sock_put(ps->tunnel_sock);
} else
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
err = -EFAULT;
if (put_user(len, (int __user *) optlen))
goto end_put_sess;
if (copy_to_user((void __user *) optval, &val, len))
goto end_put_sess;
err = 0;
end_put_sess:
sock_put(sk);
end:
return err;
}
/*****************************************************************************
* /proc filesystem for debug
* Since the original pppol2tp driver provided /proc/net/pppol2tp for
* L2TPv2, we dump only L2TPv2 tunnels and sessions here.
*****************************************************************************/
static unsigned int pppol2tp_net_id;
#ifdef CONFIG_PROC_FS
struct pppol2tp_seq_data {
struct seq_net_private p;
int tunnel_idx; /* current tunnel */
int session_idx; /* index of session within current tunnel */
struct l2tp_tunnel *tunnel;
struct l2tp_session *session; /* NULL means get next tunnel */
};
static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
{
for (;;) {
pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
pd->tunnel_idx++;
if (pd->tunnel == NULL)
break;
/* Ignore L2TPv3 tunnels */
if (pd->tunnel->version < 3)
break;
}
}
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
pd->session_idx++;
if (pd->session == NULL) {
pd->session_idx = 0;
pppol2tp_next_tunnel(net, pd);
}
}
static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
{
struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
loff_t pos = *offs;
struct net *net;
if (!pos)
goto out;
BUG_ON(m->private == NULL);
pd = m->private;
net = seq_file_net(m);
if (pd->tunnel == NULL)
pppol2tp_next_tunnel(net, pd);
else
pppol2tp_next_session(net, pd);
/* NULL tunnel and session indicates end of list */
if ((pd->tunnel == NULL) && (pd->session == NULL))
pd = NULL;
out:
return pd;
}
static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return NULL;
}
static void pppol2tp_seq_stop(struct seq_file *p, void *v)
{
/* nothing to do */
}
static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
{
struct l2tp_tunnel *tunnel = v;
seq_printf(m, "\nTUNNEL '%s', %c %d\n",
tunnel->name,
(tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
atomic_read(&tunnel->ref_count) - 1);
seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
tunnel->debug,
(unsigned long long)tunnel->stats.tx_packets,
(unsigned long long)tunnel->stats.tx_bytes,
(unsigned long long)tunnel->stats.tx_errors,
(unsigned long long)tunnel->stats.rx_packets,
(unsigned long long)tunnel->stats.rx_bytes,
(unsigned long long)tunnel->stats.rx_errors);
}
static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
{
struct l2tp_session *session = v;
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct pppox_sock *po = pppox_sk(ps->sock);
u32 ip = 0;
u16 port = 0;
if (tunnel->sock) {
struct inet_sock *inet = inet_sk(tunnel->sock);
ip = ntohl(inet->inet_saddr);
port = ntohs(inet->inet_sport);
}
seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
"%04X/%04X %d %c\n",
session->name, ip, port,
tunnel->tunnel_id,
session->session_id,
tunnel->peer_tunnel_id,
session->peer_session_id,
ps->sock->sk_state,
(session == ps->sock->sk_user_data) ?
'Y' : 'N');
seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
session->mtu, session->mru,
session->recv_seq ? 'R' : '-',
session->send_seq ? 'S' : '-',
session->lns_mode ? "LNS" : "LAC",
session->debug,
jiffies_to_msecs(session->reorder_timeout));
seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
session->nr, session->ns,
(unsigned long long)session->stats.tx_packets,
(unsigned long long)session->stats.tx_bytes,
(unsigned long long)session->stats.tx_errors,
(unsigned long long)session->stats.rx_packets,
(unsigned long long)session->stats.rx_bytes,
(unsigned long long)session->stats.rx_errors);
if (po)
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
}
static int pppol2tp_seq_show(struct seq_file *m, void *v)
{
struct pppol2tp_seq_data *pd = v;
/* display header on line 1 */
if (v == SEQ_START_TOKEN) {
seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
seq_puts(m, " SESSION name, addr/port src-tid/sid "
"dest-tid/sid state user-data-ok\n");
seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
goto out;
}
/* Show the tunnel or session context.
*/
if (pd->session == NULL)
pppol2tp_seq_tunnel_show(m, pd->tunnel);
else
pppol2tp_seq_session_show(m, pd->session);
out:
return 0;
}
static const struct seq_operations pppol2tp_seq_ops = {
.start = pppol2tp_seq_start,
.next = pppol2tp_seq_next,
.stop = pppol2tp_seq_stop,
.show = pppol2tp_seq_show,
};
/* Called when our /proc file is opened. We allocate data for use when
* iterating our tunnel / session contexts and store it in the private
* data of the seq_file.
*/
static int pppol2tp_proc_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &pppol2tp_seq_ops,
sizeof(struct pppol2tp_seq_data));
}
static const struct file_operations pppol2tp_proc_fops = {
.owner = THIS_MODULE,
.open = pppol2tp_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif /* CONFIG_PROC_FS */
/*****************************************************************************
* Network namespace
*****************************************************************************/
static __net_init int pppol2tp_init_net(struct net *net)
{
struct proc_dir_entry *pde;
int err = 0;
pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
if (!pde) {
err = -ENOMEM;
goto out;
}
out:
return err;
}
static __net_exit void pppol2tp_exit_net(struct net *net)
{
proc_net_remove(net, "pppol2tp");
}
static struct pernet_operations pppol2tp_net_ops = {
.init = pppol2tp_init_net,
.exit = pppol2tp_exit_net,
.id = &pppol2tp_net_id,
};
/*****************************************************************************
* Init and cleanup
*****************************************************************************/
static const struct proto_ops pppol2tp_ops = {
.family = AF_PPPOX,
.owner = THIS_MODULE,
.release = pppol2tp_release,
.bind = sock_no_bind,
.connect = pppol2tp_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppol2tp_getname,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = pppol2tp_setsockopt,
.getsockopt = pppol2tp_getsockopt,
.sendmsg = pppol2tp_sendmsg,
.recvmsg = pppol2tp_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
};
static const struct pppox_proto pppol2tp_proto = {
.create = pppol2tp_create,
.ioctl = pppol2tp_ioctl
};
#ifdef CONFIG_L2TP_V3
static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
.session_create = pppol2tp_session_create,
.session_delete = pppol2tp_session_delete,
};
#endif /* CONFIG_L2TP_V3 */
static int __init pppol2tp_init(void)
{
int err;
err = register_pernet_device(&pppol2tp_net_ops);
if (err)
goto out;
err = proto_register(&pppol2tp_sk_proto, 0);
if (err)
goto out_unregister_pppol2tp_pernet;
err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
if (err)
goto out_unregister_pppol2tp_proto;
#ifdef CONFIG_L2TP_V3
err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
if (err)
goto out_unregister_pppox;
#endif
printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
PPPOL2TP_DRV_VERSION);
out:
return err;
#ifdef CONFIG_L2TP_V3
out_unregister_pppox:
unregister_pppox_proto(PX_PROTO_OL2TP);
#endif
out_unregister_pppol2tp_proto:
proto_unregister(&pppol2tp_sk_proto);
out_unregister_pppol2tp_pernet:
unregister_pernet_device(&pppol2tp_net_ops);
goto out;
}
static void __exit pppol2tp_exit(void)
{
#ifdef CONFIG_L2TP_V3
l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
#endif
unregister_pppox_proto(PX_PROTO_OL2TP);
proto_unregister(&pppol2tp_sk_proto);
unregister_pernet_device(&pppol2tp_net_ops);
}
module_init(pppol2tp_init);
module_exit(pppol2tp_exit);
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("PPP over L2TP over UDP");
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
| gpl-2.0 |
JohnnySun/RaspberryPi_Lnux_Kernel_JohnnySun_Multiboot | fs/efs/dir.c | 2167 | 2664 | /*
* dir.c
*
* Copyright (c) 1999 Al Smith
*/
#include <linux/buffer_head.h>
#include "efs.h"
static int efs_readdir(struct file *, void *, filldir_t);
const struct file_operations efs_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = efs_readdir,
};
const struct inode_operations efs_dir_inode_operations = {
.lookup = efs_lookup,
};
static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
struct inode *inode = file_inode(filp);
struct buffer_head *bh;
struct efs_dir *dirblock;
struct efs_dentry *dirslot;
efs_ino_t inodenum;
efs_block_t block;
int slot, namelen;
char *nameptr;
if (inode->i_size & (EFS_DIRBSIZE-1))
printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n");
/* work out where this entry can be found */
block = filp->f_pos >> EFS_DIRBSIZE_BITS;
/* each block contains at most 256 slots */
slot = filp->f_pos & 0xff;
/* look at all blocks */
while (block < inode->i_blocks) {
/* read the dir block */
bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
if (!bh) {
printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
break;
}
dirblock = (struct efs_dir *) bh->b_data;
if (be16_to_cpu(dirblock->magic) != EFS_DIRBLK_MAGIC) {
printk(KERN_ERR "EFS: readdir(): invalid directory block\n");
brelse(bh);
break;
}
while (slot < dirblock->slots) {
if (dirblock->space[slot] == 0) {
slot++;
continue;
}
dirslot = (struct efs_dentry *) (((char *) bh->b_data) + EFS_SLOTAT(dirblock, slot));
inodenum = be32_to_cpu(dirslot->inode);
namelen = dirslot->namelen;
nameptr = dirslot->name;
#ifdef DEBUG
printk(KERN_DEBUG "EFS: readdir(): block %d slot %d/%d: inode %u, name \"%s\", namelen %u\n", block, slot, dirblock->slots-1, inodenum, nameptr, namelen);
#endif
if (namelen > 0) {
/* found the next entry */
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
/* copy filename and data in dirslot */
filldir(dirent, nameptr, namelen, filp->f_pos, inodenum, DT_UNKNOWN);
/* sanity check */
if (nameptr - (char *) dirblock + namelen > EFS_DIRBSIZE) {
printk(KERN_WARNING "EFS: directory entry %d exceeds directory block\n", slot);
slot++;
continue;
}
/* store position of next slot */
if (++slot == dirblock->slots) {
slot = 0;
block++;
}
brelse(bh);
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
goto out;
}
slot++;
}
brelse(bh);
slot = 0;
block++;
}
filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot;
out:
return 0;
}
| gpl-2.0 |
krieger-od/linux | fs/nls/mac-inuit.c | 2935 | 26996 | /*
* linux/fs/nls/mac-inuit.c
*
* Charset macinuit translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
/*
* COPYRIGHT AND PERMISSION NOTICE
*
* Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
* the Terms of Use in http://www.unicode.org/copyright.html.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of the Unicode data files and any associated documentation (the "Data
* Files") or Unicode software and any associated documentation (the
* "Software") to deal in the Data Files or Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Data Files or Software, and
* to permit persons to whom the Data Files or Software are furnished to do
* so, provided that (a) the above copyright notice(s) and this permission
* notice appear with all copies of the Data Files or Software, (b) both the
* above copyright notice(s) and this permission notice appear in associated
* documentation, and (c) there is clear notice in each modified Data File or
* in the Software as well as in the documentation associated with the Data
* File(s) or Software that the data or software has been modified.
*
* THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
* KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
* THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
* INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THE DATA FILES OR SOFTWARE.
*
* Except as contained in this notice, the name of a copyright holder shall
* not be used in advertising or otherwise to promote the sale, use or other
* dealings in these Data Files or Software without prior written
* authorization of the copyright holder.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00 */
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10 */
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20 */
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30 */
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40 */
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50 */
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60 */
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70 */
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80 */
0x1403, 0x1404, 0x1405, 0x1406,
0x140a, 0x140b, 0x1431, 0x1432,
0x1433, 0x1434, 0x1438, 0x1439,
0x1449, 0x144e, 0x144f, 0x1450,
/* 0x90 */
0x1451, 0x1455, 0x1456, 0x1466,
0x146d, 0x146e, 0x146f, 0x1470,
0x1472, 0x1473, 0x1483, 0x148b,
0x148c, 0x148d, 0x148e, 0x1490,
/* 0xa0 */
0x1491, 0x00b0, 0x14a1, 0x14a5,
0x14a6, 0x2022, 0x00b6, 0x14a7,
0x00ae, 0x00a9, 0x2122, 0x14a8,
0x14aa, 0x14ab, 0x14bb, 0x14c2,
/* 0xb0 */
0x14c3, 0x14c4, 0x14c5, 0x14c7,
0x14c8, 0x14d0, 0x14ef, 0x14f0,
0x14f1, 0x14f2, 0x14f4, 0x14f5,
0x1505, 0x14d5, 0x14d6, 0x14d7,
/* 0xc0 */
0x14d8, 0x14da, 0x14db, 0x14ea,
0x1528, 0x1529, 0x152a, 0x152b,
0x152d, 0x2026, 0x00a0, 0x152e,
0x153e, 0x1555, 0x1556, 0x1557,
/* 0xd0 */
0x2013, 0x2014, 0x201c, 0x201d,
0x2018, 0x2019, 0x1558, 0x1559,
0x155a, 0x155d, 0x1546, 0x1547,
0x1548, 0x1549, 0x154b, 0x154c,
/* 0xe0 */
0x1550, 0x157f, 0x1580, 0x1581,
0x1582, 0x1583, 0x1584, 0x1585,
0x158f, 0x1590, 0x1591, 0x1592,
0x1593, 0x1594, 0x1595, 0x1671,
/* 0xf0 */
0x1672, 0x1673, 0x1674, 0x1675,
0x1676, 0x1596, 0x15a0, 0x15a1,
0x15a2, 0x15a3, 0x15a4, 0x15a5,
0x15a6, 0x157c, 0x0141, 0x0142,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page14[256] = {
0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page15[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page16[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page21[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, page14, page15, page16, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, page21, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "macinuit",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
};
static int __init init_nls_macinuit(void)
{
return register_nls(&table);
}
static void __exit exit_nls_macinuit(void)
{
unregister_nls(&table);
}
module_init(init_nls_macinuit)
module_exit(exit_nls_macinuit)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
linux4hach/linux-at91 | arch/mips/loongson/common/cmdline.c | 3447 | 1282 | /*
* Based on Ocelot Linux port, which is
* Copyright 2001 MontaVista Software Inc.
* Author: jsun@mvista.com or jsun@junsun.net
*
* Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn>
*
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/bootinfo.h>
#include <loongson.h>
void __init prom_init_cmdline(void)
{
int prom_argc;
/* pmon passes arguments in 32bit pointers */
int *_prom_argv;
int i;
long l;
/* firmware arguments are initialized in head.S */
prom_argc = fw_arg0;
_prom_argv = (int *)fw_arg1;
/* arg[0] is "g", the rest is boot parameters */
arcs_cmdline[0] = '\0';
for (i = 1; i < prom_argc; i++) {
l = (long)_prom_argv[i];
if (strlen(arcs_cmdline) + strlen(((char *)l) + 1)
>= sizeof(arcs_cmdline))
break;
strcat(arcs_cmdline, ((char *)l));
strcat(arcs_cmdline, " ");
}
prom_init_machtype();
}
| gpl-2.0 |
kennysgithub/sm-p607t-kernel | sound/soc/imx/imx-ssi.c | 3959 | 16986 | /*
* imx-ssi.c -- ALSA Soc Audio Layer
*
* Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
*
* This code is based on code copyrighted by Freescale,
* Liam Girdwood, Javier Martin and probably others.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*
* The i.MX SSI core has some nasty limitations in AC97 mode. While most
* sane processor vendors have a FIFO per AC97 slot, the i.MX has only
* one FIFO which combines all valid receive slots. We cannot even select
* which slots we want to receive. The WM9712 with which this driver
* was developed with always sends GPIO status data in slot 12 which
* we receive in our (PCM-) data stream. The only chance we have is to
* manually skip this data in the FIQ handler. With sampling rates different
* from 48000Hz not every frame has valid receive data, so the ratio
* between pcm data and GPIO status data changes. Our FIQ handler is not
* able to handle this, hence this driver only works with 48000Hz sampling
* rate.
* Reading and writing AC97 registers is another challenge. The core
* provides us status bits when the read register is updated with *another*
* value. When we read the same register two times (and the register still
* contains the same value) these status bits are not set. We work
* around this by not polling these bits but only wait a fixed delay.
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <mach/ssi.h>
#include <mach/hardware.h>
#include "imx-ssi.h"
#define SSI_SACNT_DEFAULT (SSI_SACNT_AC97EN | SSI_SACNT_FV)
/*
* SSI Network Mode or TDM slots configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 sccr;
sccr = readl(ssi->base + SSI_STCCR);
sccr &= ~SSI_STCCR_DC_MASK;
sccr |= SSI_STCCR_DC(slots - 1);
writel(sccr, ssi->base + SSI_STCCR);
sccr = readl(ssi->base + SSI_SRCCR);
sccr &= ~SSI_STCCR_DC_MASK;
sccr |= SSI_STCCR_DC(slots - 1);
writel(sccr, ssi->base + SSI_SRCCR);
writel(tx_mask, ssi->base + SSI_STMSK);
writel(rx_mask, ssi->base + SSI_SRMSK);
return 0;
}
/*
* SSI DAI format configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 strcr = 0, scr;
scr = readl(ssi->base + SSI_SCR) & ~(SSI_SCR_SYN | SSI_SCR_NET);
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* data on rising edge of bclk, frame low 1clk before data */
strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
scr |= SSI_SCR_NET;
if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
scr &= ~SSI_I2S_MODE_MASK;
scr |= SSI_SCR_I2S_MODE_SLAVE;
}
break;
case SND_SOC_DAIFMT_LEFT_J:
/* data on rising edge of bclk, frame high with data */
strcr |= SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_DSP_B:
/* data on rising edge of bclk, frame high with data */
strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_DSP_A:
/* data on rising edge of bclk, frame high 1clk before data */
strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
}
/* DAI clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_IF:
strcr |= SSI_STCR_TFSI;
strcr &= ~SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_IB_NF:
strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
break;
case SND_SOC_DAIFMT_NB_IF:
strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_NB_NF:
strcr &= ~SSI_STCR_TFSI;
strcr |= SSI_STCR_TSCKP;
break;
}
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
break;
default:
/* Master mode not implemented, needs handling of clocks. */
return -EINVAL;
}
strcr |= SSI_STCR_TFEN0;
if (ssi->flags & IMX_SSI_NET)
scr |= SSI_SCR_NET;
if (ssi->flags & IMX_SSI_SYN)
scr |= SSI_SCR_SYN;
writel(strcr, ssi->base + SSI_STCR);
writel(strcr, ssi->base + SSI_SRCR);
writel(scr, ssi->base + SSI_SCR);
return 0;
}
/*
* SSI system clock configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 scr;
scr = readl(ssi->base + SSI_SCR);
switch (clk_id) {
case IMX_SSP_SYS_CLK:
if (dir == SND_SOC_CLOCK_OUT)
scr |= SSI_SCR_SYS_CLK_EN;
else
scr &= ~SSI_SCR_SYS_CLK_EN;
break;
default:
return -EINVAL;
}
writel(scr, ssi->base + SSI_SCR);
return 0;
}
/*
* SSI Clock dividers
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 stccr, srccr;
stccr = readl(ssi->base + SSI_STCCR);
srccr = readl(ssi->base + SSI_SRCCR);
switch (div_id) {
case IMX_SSI_TX_DIV_2:
stccr &= ~SSI_STCCR_DIV2;
stccr |= div;
break;
case IMX_SSI_TX_DIV_PSR:
stccr &= ~SSI_STCCR_PSR;
stccr |= div;
break;
case IMX_SSI_TX_DIV_PM:
stccr &= ~0xff;
stccr |= SSI_STCCR_PM(div);
break;
case IMX_SSI_RX_DIV_2:
stccr &= ~SSI_STCCR_DIV2;
stccr |= div;
break;
case IMX_SSI_RX_DIV_PSR:
stccr &= ~SSI_STCCR_PSR;
stccr |= div;
break;
case IMX_SSI_RX_DIV_PM:
stccr &= ~0xff;
stccr |= SSI_STCCR_PM(div);
break;
default:
return -EINVAL;
}
writel(stccr, ssi->base + SSI_STCCR);
writel(srccr, ssi->base + SSI_SRCCR);
return 0;
}
static int imx_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
struct imx_pcm_dma_params *dma_data;
/* Tx/Rx config */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &ssi->dma_params_tx;
else
dma_data = &ssi->dma_params_rx;
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
return 0;
}
/*
* Should only be called when port is inactive (i.e. SSIEN = 0),
* although can be called multiple times by upper layers.
*/
static int imx_ssi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 reg, sccr;
/* Tx/Rx config */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = SSI_STCCR;
else
reg = SSI_SRCCR;
if (ssi->flags & IMX_SSI_SYN)
reg = SSI_STCCR;
sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;
/* DAI data (word) size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
sccr |= SSI_SRCCR_WL(16);
break;
case SNDRV_PCM_FORMAT_S20_3LE:
sccr |= SSI_SRCCR_WL(20);
break;
case SNDRV_PCM_FORMAT_S24_LE:
sccr |= SSI_SRCCR_WL(24);
break;
}
writel(sccr, ssi->base + reg);
return 0;
}
static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(dai);
unsigned int sier_bits, sier;
unsigned int scr;
scr = readl(ssi->base + SSI_SCR);
sier = readl(ssi->base + SSI_SIER);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (ssi->flags & IMX_SSI_DMA)
sier_bits = SSI_SIER_TDMAE;
else
sier_bits = SSI_SIER_TIE | SSI_SIER_TFE0_EN;
} else {
if (ssi->flags & IMX_SSI_DMA)
sier_bits = SSI_SIER_RDMAE;
else
sier_bits = SSI_SIER_RIE | SSI_SIER_RFF0_EN;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
scr |= SSI_SCR_TE;
else
scr |= SSI_SCR_RE;
sier |= sier_bits;
if (++ssi->enabled == 1)
scr |= SSI_SCR_SSIEN;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
scr &= ~SSI_SCR_TE;
else
scr &= ~SSI_SCR_RE;
sier &= ~sier_bits;
if (--ssi->enabled == 0)
scr &= ~SSI_SCR_SSIEN;
break;
default:
return -EINVAL;
}
if (!(ssi->flags & IMX_SSI_USE_AC97))
/* rx/tx are always enabled to access ac97 registers */
writel(scr, ssi->base + SSI_SCR);
writel(sier, ssi->base + SSI_SIER);
return 0;
}
static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
.startup = imx_ssi_startup,
.hw_params = imx_ssi_hw_params,
.set_fmt = imx_ssi_set_dai_fmt,
.set_clkdiv = imx_ssi_set_dai_clkdiv,
.set_sysclk = imx_ssi_set_dai_sysclk,
.set_tdm_slot = imx_ssi_set_dai_tdm_slot,
.trigger = imx_ssi_trigger,
};
static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
{
struct imx_ssi *ssi = dev_get_drvdata(dai->dev);
uint32_t val;
snd_soc_dai_set_drvdata(dai, ssi);
val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) |
SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize);
writel(val, ssi->base + SSI_SFCSR);
return 0;
}
static struct snd_soc_dai_driver imx_ssi_dai = {
.probe = imx_ssi_dai_probe,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &imx_ssi_pcm_dai_ops,
};
static struct snd_soc_dai_driver imx_ac97_dai = {
.probe = imx_ssi_dai_probe,
.ac97_control = 1,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &imx_ssi_pcm_dai_ops,
};
static void setup_channel_to_ac97(struct imx_ssi *imx_ssi)
{
void __iomem *base = imx_ssi->base;
writel(0x0, base + SSI_SCR);
writel(0x0, base + SSI_STCR);
writel(0x0, base + SSI_SRCR);
writel(SSI_SCR_SYN | SSI_SCR_NET, base + SSI_SCR);
writel(SSI_SFCSR_RFWM0(8) |
SSI_SFCSR_TFWM0(8) |
SSI_SFCSR_RFWM1(8) |
SSI_SFCSR_TFWM1(8), base + SSI_SFCSR);
writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_STCCR);
writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_SRCCR);
writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN, base + SSI_SCR);
writel(SSI_SOR_WAIT(3), base + SSI_SOR);
writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN |
SSI_SCR_TE | SSI_SCR_RE,
base + SSI_SCR);
writel(SSI_SACNT_DEFAULT, base + SSI_SACNT);
writel(0xff, base + SSI_SACCDIS);
writel(0x300, base + SSI_SACCEN);
}
static struct imx_ssi *ac97_ssi;
static void imx_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct imx_ssi *imx_ssi = ac97_ssi;
void __iomem *base = imx_ssi->base;
unsigned int lreg;
unsigned int lval;
if (reg > 0x7f)
return;
pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val);
lreg = reg << 12;
writel(lreg, base + SSI_SACADD);
lval = val << 4;
writel(lval , base + SSI_SACDAT);
writel(SSI_SACNT_DEFAULT | SSI_SACNT_WR, base + SSI_SACNT);
udelay(100);
}
static unsigned short imx_ssi_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct imx_ssi *imx_ssi = ac97_ssi;
void __iomem *base = imx_ssi->base;
unsigned short val = -1;
unsigned int lreg;
lreg = (reg & 0x7f) << 12 ;
writel(lreg, base + SSI_SACADD);
writel(SSI_SACNT_DEFAULT | SSI_SACNT_RD, base + SSI_SACNT);
udelay(100);
val = (readl(base + SSI_SACDAT) >> 4) & 0xffff;
pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val);
return val;
}
static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
{
struct imx_ssi *imx_ssi = ac97_ssi;
if (imx_ssi->ac97_reset)
imx_ssi->ac97_reset(ac97);
}
static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct imx_ssi *imx_ssi = ac97_ssi;
if (imx_ssi->ac97_warm_reset)
imx_ssi->ac97_warm_reset(ac97);
}
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = imx_ssi_ac97_read,
.write = imx_ssi_ac97_write,
.reset = imx_ssi_ac97_reset,
.warm_reset = imx_ssi_ac97_warm_reset
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int imx_ssi_probe(struct platform_device *pdev)
{
struct resource *res;
struct imx_ssi *ssi;
struct imx_ssi_platform_data *pdata = pdev->dev.platform_data;
int ret = 0;
struct snd_soc_dai_driver *dai;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
if (!ssi)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, ssi);
if (pdata) {
ssi->ac97_reset = pdata->ac97_reset;
ssi->ac97_warm_reset = pdata->ac97_warm_reset;
ssi->flags = pdata->flags;
}
ssi->irq = platform_get_irq(pdev, 0);
ssi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(ssi->clk)) {
ret = PTR_ERR(ssi->clk);
dev_err(&pdev->dev, "Cannot get the clock: %d\n",
ret);
goto failed_clk;
}
clk_enable(ssi->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto failed_get_resource;
}
if (!request_mem_region(res->start, resource_size(res), DRV_NAME)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto failed_get_resource;
}
ssi->base = ioremap(res->start, resource_size(res));
if (!ssi->base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENODEV;
goto failed_ioremap;
}
if (ssi->flags & IMX_SSI_USE_AC97) {
if (ac97_ssi) {
ret = -EBUSY;
goto failed_ac97;
}
ac97_ssi = ssi;
setup_channel_to_ac97(ssi);
dai = &imx_ac97_dai;
} else
dai = &imx_ssi_dai;
writel(0x0, ssi->base + SSI_SIER);
ssi->dma_params_rx.dma_addr = res->start + SSI_SRX0;
ssi->dma_params_tx.dma_addr = res->start + SSI_STX0;
ssi->dma_params_tx.burstsize = 6;
ssi->dma_params_rx.burstsize = 4;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0");
if (res)
ssi->dma_params_tx.dma = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx0");
if (res)
ssi->dma_params_rx.dma = res->start;
platform_set_drvdata(pdev, ssi);
ret = snd_soc_register_dai(&pdev->dev, dai);
if (ret) {
dev_err(&pdev->dev, "register DAI failed\n");
goto failed_register;
}
ssi->soc_platform_pdev_fiq = platform_device_alloc("imx-fiq-pcm-audio", pdev->id);
if (!ssi->soc_platform_pdev_fiq) {
ret = -ENOMEM;
goto failed_pdev_fiq_alloc;
}
platform_set_drvdata(ssi->soc_platform_pdev_fiq, ssi);
ret = platform_device_add(ssi->soc_platform_pdev_fiq);
if (ret) {
dev_err(&pdev->dev, "failed to add platform device\n");
goto failed_pdev_fiq_add;
}
ssi->soc_platform_pdev = platform_device_alloc("imx-pcm-audio", pdev->id);
if (!ssi->soc_platform_pdev) {
ret = -ENOMEM;
goto failed_pdev_alloc;
}
platform_set_drvdata(ssi->soc_platform_pdev, ssi);
ret = platform_device_add(ssi->soc_platform_pdev);
if (ret) {
dev_err(&pdev->dev, "failed to add platform device\n");
goto failed_pdev_add;
}
return 0;
failed_pdev_add:
platform_device_put(ssi->soc_platform_pdev);
failed_pdev_alloc:
platform_device_del(ssi->soc_platform_pdev_fiq);
failed_pdev_fiq_add:
platform_device_put(ssi->soc_platform_pdev_fiq);
failed_pdev_fiq_alloc:
snd_soc_unregister_dai(&pdev->dev);
failed_register:
failed_ac97:
iounmap(ssi->base);
failed_ioremap:
release_mem_region(res->start, resource_size(res));
failed_get_resource:
clk_disable(ssi->clk);
clk_put(ssi->clk);
failed_clk:
kfree(ssi);
return ret;
}
static int __devexit imx_ssi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct imx_ssi *ssi = platform_get_drvdata(pdev);
platform_device_unregister(ssi->soc_platform_pdev);
platform_device_unregister(ssi->soc_platform_pdev_fiq);
snd_soc_unregister_dai(&pdev->dev);
if (ssi->flags & IMX_SSI_USE_AC97)
ac97_ssi = NULL;
iounmap(ssi->base);
release_mem_region(res->start, resource_size(res));
clk_disable(ssi->clk);
clk_put(ssi->clk);
kfree(ssi);
return 0;
}
static struct platform_driver imx_ssi_driver = {
.probe = imx_ssi_probe,
.remove = __devexit_p(imx_ssi_remove),
.driver = {
.name = "imx-ssi",
.owner = THIS_MODULE,
},
};
module_platform_driver(imx_ssi_driver);
/* Module information */
MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-ssi");
| gpl-2.0 |
cm-mirror/android_kernel_xiaomi_dior | sound/soc/imx/imx-ssi.c | 3959 | 16986 | /*
* imx-ssi.c -- ALSA Soc Audio Layer
*
* Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
*
* This code is based on code copyrighted by Freescale,
* Liam Girdwood, Javier Martin and probably others.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*
* The i.MX SSI core has some nasty limitations in AC97 mode. While most
* sane processor vendors have a FIFO per AC97 slot, the i.MX has only
* one FIFO which combines all valid receive slots. We cannot even select
* which slots we want to receive. The WM9712 with which this driver
* was developed with always sends GPIO status data in slot 12 which
* we receive in our (PCM-) data stream. The only chance we have is to
* manually skip this data in the FIQ handler. With sampling rates different
* from 48000Hz not every frame has valid receive data, so the ratio
* between pcm data and GPIO status data changes. Our FIQ handler is not
* able to handle this, hence this driver only works with 48000Hz sampling
* rate.
* Reading and writing AC97 registers is another challenge. The core
* provides us status bits when the read register is updated with *another*
* value. When we read the same register two times (and the register still
* contains the same value) these status bits are not set. We work
* around this by not polling these bits but only wait a fixed delay.
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <mach/ssi.h>
#include <mach/hardware.h>
#include "imx-ssi.h"
#define SSI_SACNT_DEFAULT (SSI_SACNT_AC97EN | SSI_SACNT_FV)
/*
* SSI Network Mode or TDM slots configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 sccr;
sccr = readl(ssi->base + SSI_STCCR);
sccr &= ~SSI_STCCR_DC_MASK;
sccr |= SSI_STCCR_DC(slots - 1);
writel(sccr, ssi->base + SSI_STCCR);
sccr = readl(ssi->base + SSI_SRCCR);
sccr &= ~SSI_STCCR_DC_MASK;
sccr |= SSI_STCCR_DC(slots - 1);
writel(sccr, ssi->base + SSI_SRCCR);
writel(tx_mask, ssi->base + SSI_STMSK);
writel(rx_mask, ssi->base + SSI_SRMSK);
return 0;
}
/*
* SSI DAI format configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 strcr = 0, scr;
scr = readl(ssi->base + SSI_SCR) & ~(SSI_SCR_SYN | SSI_SCR_NET);
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* data on rising edge of bclk, frame low 1clk before data */
strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
scr |= SSI_SCR_NET;
if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
scr &= ~SSI_I2S_MODE_MASK;
scr |= SSI_SCR_I2S_MODE_SLAVE;
}
break;
case SND_SOC_DAIFMT_LEFT_J:
/* data on rising edge of bclk, frame high with data */
strcr |= SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_DSP_B:
/* data on rising edge of bclk, frame high with data */
strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_DSP_A:
/* data on rising edge of bclk, frame high 1clk before data */
strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
}
/* DAI clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_IF:
strcr |= SSI_STCR_TFSI;
strcr &= ~SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_IB_NF:
strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
break;
case SND_SOC_DAIFMT_NB_IF:
strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_NB_NF:
strcr &= ~SSI_STCR_TFSI;
strcr |= SSI_STCR_TSCKP;
break;
}
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
break;
default:
/* Master mode not implemented, needs handling of clocks. */
return -EINVAL;
}
strcr |= SSI_STCR_TFEN0;
if (ssi->flags & IMX_SSI_NET)
scr |= SSI_SCR_NET;
if (ssi->flags & IMX_SSI_SYN)
scr |= SSI_SCR_SYN;
writel(strcr, ssi->base + SSI_STCR);
writel(strcr, ssi->base + SSI_SRCR);
writel(scr, ssi->base + SSI_SCR);
return 0;
}
/*
* SSI system clock configuration.
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 scr;
scr = readl(ssi->base + SSI_SCR);
switch (clk_id) {
case IMX_SSP_SYS_CLK:
if (dir == SND_SOC_CLOCK_OUT)
scr |= SSI_SCR_SYS_CLK_EN;
else
scr &= ~SSI_SCR_SYS_CLK_EN;
break;
default:
return -EINVAL;
}
writel(scr, ssi->base + SSI_SCR);
return 0;
}
/*
* SSI Clock dividers
* Should only be called when port is inactive (i.e. SSIEN = 0).
*/
static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 stccr, srccr;
stccr = readl(ssi->base + SSI_STCCR);
srccr = readl(ssi->base + SSI_SRCCR);
switch (div_id) {
case IMX_SSI_TX_DIV_2:
stccr &= ~SSI_STCCR_DIV2;
stccr |= div;
break;
case IMX_SSI_TX_DIV_PSR:
stccr &= ~SSI_STCCR_PSR;
stccr |= div;
break;
case IMX_SSI_TX_DIV_PM:
stccr &= ~0xff;
stccr |= SSI_STCCR_PM(div);
break;
case IMX_SSI_RX_DIV_2:
stccr &= ~SSI_STCCR_DIV2;
stccr |= div;
break;
case IMX_SSI_RX_DIV_PSR:
stccr &= ~SSI_STCCR_PSR;
stccr |= div;
break;
case IMX_SSI_RX_DIV_PM:
stccr &= ~0xff;
stccr |= SSI_STCCR_PM(div);
break;
default:
return -EINVAL;
}
writel(stccr, ssi->base + SSI_STCCR);
writel(srccr, ssi->base + SSI_SRCCR);
return 0;
}
static int imx_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
struct imx_pcm_dma_params *dma_data;
/* Tx/Rx config */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &ssi->dma_params_tx;
else
dma_data = &ssi->dma_params_rx;
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
return 0;
}
/*
* Should only be called when port is inactive (i.e. SSIEN = 0),
* although can be called multiple times by upper layers.
*/
static int imx_ssi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai);
u32 reg, sccr;
/* Tx/Rx config */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
reg = SSI_STCCR;
else
reg = SSI_SRCCR;
if (ssi->flags & IMX_SSI_SYN)
reg = SSI_STCCR;
sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;
/* DAI data (word) size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
sccr |= SSI_SRCCR_WL(16);
break;
case SNDRV_PCM_FORMAT_S20_3LE:
sccr |= SSI_SRCCR_WL(20);
break;
case SNDRV_PCM_FORMAT_S24_LE:
sccr |= SSI_SRCCR_WL(24);
break;
}
writel(sccr, ssi->base + reg);
return 0;
}
static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct imx_ssi *ssi = snd_soc_dai_get_drvdata(dai);
unsigned int sier_bits, sier;
unsigned int scr;
scr = readl(ssi->base + SSI_SCR);
sier = readl(ssi->base + SSI_SIER);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (ssi->flags & IMX_SSI_DMA)
sier_bits = SSI_SIER_TDMAE;
else
sier_bits = SSI_SIER_TIE | SSI_SIER_TFE0_EN;
} else {
if (ssi->flags & IMX_SSI_DMA)
sier_bits = SSI_SIER_RDMAE;
else
sier_bits = SSI_SIER_RIE | SSI_SIER_RFF0_EN;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
scr |= SSI_SCR_TE;
else
scr |= SSI_SCR_RE;
sier |= sier_bits;
if (++ssi->enabled == 1)
scr |= SSI_SCR_SSIEN;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
scr &= ~SSI_SCR_TE;
else
scr &= ~SSI_SCR_RE;
sier &= ~sier_bits;
if (--ssi->enabled == 0)
scr &= ~SSI_SCR_SSIEN;
break;
default:
return -EINVAL;
}
if (!(ssi->flags & IMX_SSI_USE_AC97))
/* rx/tx are always enabled to access ac97 registers */
writel(scr, ssi->base + SSI_SCR);
writel(sier, ssi->base + SSI_SIER);
return 0;
}
static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
.startup = imx_ssi_startup,
.hw_params = imx_ssi_hw_params,
.set_fmt = imx_ssi_set_dai_fmt,
.set_clkdiv = imx_ssi_set_dai_clkdiv,
.set_sysclk = imx_ssi_set_dai_sysclk,
.set_tdm_slot = imx_ssi_set_dai_tdm_slot,
.trigger = imx_ssi_trigger,
};
static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
{
struct imx_ssi *ssi = dev_get_drvdata(dai->dev);
uint32_t val;
snd_soc_dai_set_drvdata(dai, ssi);
val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) |
SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize);
writel(val, ssi->base + SSI_SFCSR);
return 0;
}
static struct snd_soc_dai_driver imx_ssi_dai = {
.probe = imx_ssi_dai_probe,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &imx_ssi_pcm_dai_ops,
};
static struct snd_soc_dai_driver imx_ac97_dai = {
.probe = imx_ssi_dai_probe,
.ac97_control = 1,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &imx_ssi_pcm_dai_ops,
};
static void setup_channel_to_ac97(struct imx_ssi *imx_ssi)
{
void __iomem *base = imx_ssi->base;
writel(0x0, base + SSI_SCR);
writel(0x0, base + SSI_STCR);
writel(0x0, base + SSI_SRCR);
writel(SSI_SCR_SYN | SSI_SCR_NET, base + SSI_SCR);
writel(SSI_SFCSR_RFWM0(8) |
SSI_SFCSR_TFWM0(8) |
SSI_SFCSR_RFWM1(8) |
SSI_SFCSR_TFWM1(8), base + SSI_SFCSR);
writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_STCCR);
writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_SRCCR);
writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN, base + SSI_SCR);
writel(SSI_SOR_WAIT(3), base + SSI_SOR);
writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN |
SSI_SCR_TE | SSI_SCR_RE,
base + SSI_SCR);
writel(SSI_SACNT_DEFAULT, base + SSI_SACNT);
writel(0xff, base + SSI_SACCDIS);
writel(0x300, base + SSI_SACCEN);
}
static struct imx_ssi *ac97_ssi;
static void imx_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
struct imx_ssi *imx_ssi = ac97_ssi;
void __iomem *base = imx_ssi->base;
unsigned int lreg;
unsigned int lval;
if (reg > 0x7f)
return;
pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val);
lreg = reg << 12;
writel(lreg, base + SSI_SACADD);
lval = val << 4;
writel(lval , base + SSI_SACDAT);
writel(SSI_SACNT_DEFAULT | SSI_SACNT_WR, base + SSI_SACNT);
udelay(100);
}
static unsigned short imx_ssi_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct imx_ssi *imx_ssi = ac97_ssi;
void __iomem *base = imx_ssi->base;
unsigned short val = -1;
unsigned int lreg;
lreg = (reg & 0x7f) << 12 ;
writel(lreg, base + SSI_SACADD);
writel(SSI_SACNT_DEFAULT | SSI_SACNT_RD, base + SSI_SACNT);
udelay(100);
val = (readl(base + SSI_SACDAT) >> 4) & 0xffff;
pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val);
return val;
}
static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
{
struct imx_ssi *imx_ssi = ac97_ssi;
if (imx_ssi->ac97_reset)
imx_ssi->ac97_reset(ac97);
}
static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct imx_ssi *imx_ssi = ac97_ssi;
if (imx_ssi->ac97_warm_reset)
imx_ssi->ac97_warm_reset(ac97);
}
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = imx_ssi_ac97_read,
.write = imx_ssi_ac97_write,
.reset = imx_ssi_ac97_reset,
.warm_reset = imx_ssi_ac97_warm_reset
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int imx_ssi_probe(struct platform_device *pdev)
{
struct resource *res;
struct imx_ssi *ssi;
struct imx_ssi_platform_data *pdata = pdev->dev.platform_data;
int ret = 0;
struct snd_soc_dai_driver *dai;
ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
if (!ssi)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, ssi);
if (pdata) {
ssi->ac97_reset = pdata->ac97_reset;
ssi->ac97_warm_reset = pdata->ac97_warm_reset;
ssi->flags = pdata->flags;
}
ssi->irq = platform_get_irq(pdev, 0);
ssi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(ssi->clk)) {
ret = PTR_ERR(ssi->clk);
dev_err(&pdev->dev, "Cannot get the clock: %d\n",
ret);
goto failed_clk;
}
clk_enable(ssi->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto failed_get_resource;
}
if (!request_mem_region(res->start, resource_size(res), DRV_NAME)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto failed_get_resource;
}
ssi->base = ioremap(res->start, resource_size(res));
if (!ssi->base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENODEV;
goto failed_ioremap;
}
if (ssi->flags & IMX_SSI_USE_AC97) {
if (ac97_ssi) {
ret = -EBUSY;
goto failed_ac97;
}
ac97_ssi = ssi;
setup_channel_to_ac97(ssi);
dai = &imx_ac97_dai;
} else
dai = &imx_ssi_dai;
writel(0x0, ssi->base + SSI_SIER);
ssi->dma_params_rx.dma_addr = res->start + SSI_SRX0;
ssi->dma_params_tx.dma_addr = res->start + SSI_STX0;
ssi->dma_params_tx.burstsize = 6;
ssi->dma_params_rx.burstsize = 4;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0");
if (res)
ssi->dma_params_tx.dma = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx0");
if (res)
ssi->dma_params_rx.dma = res->start;
platform_set_drvdata(pdev, ssi);
ret = snd_soc_register_dai(&pdev->dev, dai);
if (ret) {
dev_err(&pdev->dev, "register DAI failed\n");
goto failed_register;
}
ssi->soc_platform_pdev_fiq = platform_device_alloc("imx-fiq-pcm-audio", pdev->id);
if (!ssi->soc_platform_pdev_fiq) {
ret = -ENOMEM;
goto failed_pdev_fiq_alloc;
}
platform_set_drvdata(ssi->soc_platform_pdev_fiq, ssi);
ret = platform_device_add(ssi->soc_platform_pdev_fiq);
if (ret) {
dev_err(&pdev->dev, "failed to add platform device\n");
goto failed_pdev_fiq_add;
}
ssi->soc_platform_pdev = platform_device_alloc("imx-pcm-audio", pdev->id);
if (!ssi->soc_platform_pdev) {
ret = -ENOMEM;
goto failed_pdev_alloc;
}
platform_set_drvdata(ssi->soc_platform_pdev, ssi);
ret = platform_device_add(ssi->soc_platform_pdev);
if (ret) {
dev_err(&pdev->dev, "failed to add platform device\n");
goto failed_pdev_add;
}
return 0;
failed_pdev_add:
platform_device_put(ssi->soc_platform_pdev);
failed_pdev_alloc:
platform_device_del(ssi->soc_platform_pdev_fiq);
failed_pdev_fiq_add:
platform_device_put(ssi->soc_platform_pdev_fiq);
failed_pdev_fiq_alloc:
snd_soc_unregister_dai(&pdev->dev);
failed_register:
failed_ac97:
iounmap(ssi->base);
failed_ioremap:
release_mem_region(res->start, resource_size(res));
failed_get_resource:
clk_disable(ssi->clk);
clk_put(ssi->clk);
failed_clk:
kfree(ssi);
return ret;
}
static int __devexit imx_ssi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct imx_ssi *ssi = platform_get_drvdata(pdev);
platform_device_unregister(ssi->soc_platform_pdev);
platform_device_unregister(ssi->soc_platform_pdev_fiq);
snd_soc_unregister_dai(&pdev->dev);
if (ssi->flags & IMX_SSI_USE_AC97)
ac97_ssi = NULL;
iounmap(ssi->base);
release_mem_region(res->start, resource_size(res));
clk_disable(ssi->clk);
clk_put(ssi->clk);
kfree(ssi);
return 0;
}
static struct platform_driver imx_ssi_driver = {
.probe = imx_ssi_probe,
.remove = __devexit_p(imx_ssi_remove),
.driver = {
.name = "imx-ssi",
.owner = THIS_MODULE,
},
};
module_platform_driver(imx_ssi_driver);
/* Module information */
MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-ssi");
| gpl-2.0 |
nizovn/linux | arch/sh/boards/board-magicpanelr2.c | 4727 | 11237 | /*
* linux/arch/sh/boards/magicpanel/setup.c
*
* Copyright (C) 2007 Markus Brunner, Mark Jonas
*
* Magic Panel Release 2 board setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/map.h>
#include <linux/sh_intc.h>
#include <mach/magicpanelr2.h>
#include <asm/heartbeat.h>
#include <cpu/sh7720.h>
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
};
#define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL)
/* Wait until reset finished. Timeout is 100ms. */
static int __init ethernet_reset_finished(void)
{
int i;
if (LAN9115_READY)
return 1;
for (i = 0; i < 10; ++i) {
mdelay(10);
if (LAN9115_READY)
return 1;
}
return 0;
}
static void __init reset_ethernet(void)
{
/* PMDR: LAN_RESET=on */
CLRBITS_OUTB(0x10, PORT_PMDR);
udelay(200);
/* PMDR: LAN_RESET=off */
SETBITS_OUTB(0x10, PORT_PMDR);
}
static void __init setup_chip_select(void)
{
/* CS2: LAN (0x08000000 - 0x0bffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x36db0400, CS2BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x000003c0, CS2WCR);
/* CS4: CAN1 (0xb0000000 - 0xb3ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS4BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS4WCR);
/* CS5a: CAN2 (0xb4000000 - 0xb5ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5ABCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5AWCR);
/* CS5b: CAN3 (0xb6000000 - 0xb7ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5BBCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5BWCR);
/* CS6a: Rotary (0xb8000000 - 0xb9ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS6ABCR);
/* (SW:1.5 WR:3 HW:1.5), no ext. wait */
__raw_writel(0x001009C1, CS6AWCR);
}
static void __init setup_port_multiplexing(void)
{
/* A7 GPO(LED8); A6 GPO(LED7); A5 GPO(LED6); A4 GPO(LED5);
* A3 GPO(LED4); A2 GPO(LED3); A1 GPO(LED2); A0 GPO(LED1);
*/
__raw_writew(0x5555, PORT_PACR); /* 01 01 01 01 01 01 01 01 */
/* B7 GPO(RST4); B6 GPO(RST3); B5 GPO(RST2); B4 GPO(RST1);
* B3 GPO(PB3); B2 GPO(PB2); B1 GPO(PB1); B0 GPO(PB0);
*/
__raw_writew(0x5555, PORT_PBCR); /* 01 01 01 01 01 01 01 01 */
/* C7 GPO(PC7); C6 GPO(PC6); C5 GPO(PC5); C4 GPO(PC4);
* C3 LCD_DATA3; C2 LCD_DATA2; C1 LCD_DATA1; C0 LCD_DATA0;
*/
__raw_writew(0x5500, PORT_PCCR); /* 01 01 01 01 00 00 00 00 */
/* D7 GPO(PD7); D6 GPO(PD6); D5 GPO(PD5); D4 GPO(PD4);
* D3 GPO(PD3); D2 GPO(PD2); D1 GPO(PD1); D0 GPO(PD0);
*/
__raw_writew(0x5555, PORT_PDCR); /* 01 01 01 01 01 01 01 01 */
/* E7 (x); E6 GPI(nu); E5 GPI(nu); E4 LCD_M_DISP;
* E3 LCD_CL1; E2 LCD_CL2; E1 LCD_DON; E0 LCD_FLM;
*/
__raw_writew(0x3C00, PORT_PECR); /* 00 11 11 00 00 00 00 00 */
/* F7 (x); F6 DA1(VLCD); F5 DA0(nc); F4 AN3;
* F3 AN2(MID_AD); F2 AN1(EARTH_AD); F1 AN0(TEMP); F0 GPI+(nc);
*/
__raw_writew(0x0002, PORT_PFCR); /* 00 00 00 00 00 00 00 10 */
/* G7 (x); G6 IRQ5(TOUCH_BUSY); G5 IRQ4(TOUCH_IRQ); G4 GPI(KEY2);
* G3 GPI(KEY1); G2 GPO(LED11); G1 GPO(LED10); G0 GPO(LED9);
*/
__raw_writew(0x03D5, PORT_PGCR); /* 00 00 00 11 11 01 01 01 */
/* H7 (x); H6 /RAS(BRAS); H5 /CAS(BCAS); H4 CKE(BCKE);
* H3 GPO(EARTH_OFF); H2 GPO(EARTH_TEST); H1 USB2_PWR; H0 USB1_PWR;
*/
__raw_writew(0x0050, PORT_PHCR); /* 00 00 00 00 01 01 00 00 */
/* J7 (x); J6 AUDCK; J5 ASEBRKAK; J4 AUDATA3;
* J3 AUDATA2; J2 AUDATA1; J1 AUDATA0; J0 AUDSYNC;
*/
__raw_writew(0x0000, PORT_PJCR); /* 00 00 00 00 00 00 00 00 */
/* K7 (x); K6 (x); K5 (x); K4 (x);
* K3 PINT7(/PWR2); K2 PINT6(/PWR1); K1 PINT5(nu); K0 PINT4(FLASH_READY)
*/
__raw_writew(0x00FF, PORT_PKCR); /* 00 00 00 00 11 11 11 11 */
/* L7 TRST; L6 TMS; L5 TDO; L4 TDI;
* L3 TCK; L2 (x); L1 (x); L0 (x);
*/
__raw_writew(0x0000, PORT_PLCR); /* 00 00 00 00 00 00 00 00 */
/* M7 GPO(CURRENT_SINK); M6 GPO(PWR_SWITCH); M5 GPO(LAN_SPEED);
* M4 GPO(LAN_RESET); M3 GPO(BUZZER); M2 GPO(LCD_BL);
* M1 CS5B(CAN3_CS); M0 GPI+(nc);
*/
__raw_writew(0x5552, PORT_PMCR); /* 01 01 01 01 01 01 00 10 */
/* CURRENT_SINK=off, PWR_SWITCH=off, LAN_SPEED=100MBit,
* LAN_RESET=off, BUZZER=off, LCD_BL=off
*/
#if CONFIG_SH_MAGIC_PANEL_R2_VERSION == 2
__raw_writeb(0x30, PORT_PMDR);
#elif CONFIG_SH_MAGIC_PANEL_R2_VERSION == 3
__raw_writeb(0xF0, PORT_PMDR);
#else
#error Unknown revision of PLATFORM_MP_R2
#endif
/* P7 (x); P6 (x); P5 (x);
* P4 GPO(nu); P3 IRQ3(LAN_IRQ); P2 IRQ2(CAN3_IRQ);
* P1 IRQ1(CAN2_IRQ); P0 IRQ0(CAN1_IRQ)
*/
__raw_writew(0x0100, PORT_PPCR); /* 00 00 00 01 00 00 00 00 */
__raw_writeb(0x10, PORT_PPDR);
/* R7 A25; R6 A24; R5 A23; R4 A22;
* R3 A21; R2 A20; R1 A19; R0 A0;
*/
gpio_request(GPIO_FN_A25, NULL);
gpio_request(GPIO_FN_A24, NULL);
gpio_request(GPIO_FN_A23, NULL);
gpio_request(GPIO_FN_A22, NULL);
gpio_request(GPIO_FN_A21, NULL);
gpio_request(GPIO_FN_A20, NULL);
gpio_request(GPIO_FN_A19, NULL);
gpio_request(GPIO_FN_A0, NULL);
/* S7 (x); S6 (x); S5 (x); S4 GPO(EEPROM_CS2);
* S3 GPO(EEPROM_CS1); S2 SIOF0_TXD; S1 SIOF0_RXD; S0 SIOF0_SCK;
*/
__raw_writew(0x0140, PORT_PSCR); /* 00 00 00 01 01 00 00 00 */
/* T7 (x); T6 (x); T5 (x); T4 COM1_CTS;
* T3 COM1_RTS; T2 COM1_TXD; T1 COM1_RXD; T0 GPO(WDOG)
*/
__raw_writew(0x0001, PORT_PTCR); /* 00 00 00 00 00 00 00 01 */
/* U7 (x); U6 (x); U5 (x); U4 GPI+(/AC_FAULT);
* U3 GPO(TOUCH_CS); U2 TOUCH_TXD; U1 TOUCH_RXD; U0 TOUCH_SCK;
*/
__raw_writew(0x0240, PORT_PUCR); /* 00 00 00 10 01 00 00 00 */
/* V7 (x); V6 (x); V5 (x); V4 GPO(MID2);
* V3 GPO(MID1); V2 CARD_TxD; V1 CARD_RxD; V0 GPI+(/BAT_FAULT);
*/
__raw_writew(0x0142, PORT_PVCR); /* 00 00 00 01 01 00 00 10 */
}
static void __init mpr2_setup(char **cmdline_p)
{
/* set Pin Select Register A:
* /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2,
* /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND
*/
__raw_writew(0xAABC, PORT_PSELA);
/* set Pin Select Register B:
* /SCIF0_RTS, /SCIF0_CTS, LCD_VCPWC,
* LCD_VEPWC, IIC_SDA, IIC_SCL, Reserved
*/
__raw_writew(0x3C00, PORT_PSELB);
/* set Pin Select Register C:
* SIOF1_SCK, SIOF1_RxD, SCIF1_RxD, SCIF1_TxD, Reserved
*/
__raw_writew(0x0000, PORT_PSELC);
/* set Pin Select Register D: Reserved, SIOF1_TxD, Reserved, SIOF1_MCLK,
* Reserved, SIOF1_SYNC, Reserved, SCIF1_SCK, Reserved
*/
__raw_writew(0x0000, PORT_PSELD);
/* set USB TxRx Control: Reserved, DRV, Reserved, USB_TRANS, USB_SEL */
__raw_writew(0x0101, PORT_UTRCTL);
/* set USB Clock Control: USSCS, USSTB, Reserved (HighByte always A5) */
__raw_writew(0xA5C0, PORT_UCLKCR_W);
setup_chip_select();
setup_port_multiplexing();
reset_ethernet();
printk(KERN_INFO "Magic Panel Release 2 A.%i\n",
CONFIG_SH_MAGIC_PANEL_R2_VERSION);
if (ethernet_reset_finished() == 0)
printk(KERN_WARNING "Ethernet not ready\n");
}
static struct resource smsc911x_resources[] = {
[0] = {
.start = 0xa8000000,
.end = 0xabffffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x660),
.end = evt2irq(0x660),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct resource heartbeat_resources[] = {
[0] = {
.start = PA_LED,
.end = PA_LED,
.flags = IORESOURCE_MEM,
},
};
static struct heartbeat_data heartbeat_data = {
.flags = HEARTBEAT_INVERTED,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.dev = {
.platform_data = &heartbeat_data,
},
.num_resources = ARRAY_SIZE(heartbeat_resources),
.resource = heartbeat_resources,
};
static struct mtd_partition mpr2_partitions[] = {
/* Reserved for bootloader, read-only */
{
.name = "Bootloader",
.offset = 0x00000000UL,
.size = MPR2_MTD_BOOTLOADER_SIZE,
.mask_flags = MTD_WRITEABLE,
},
/* Reserved for kernel image */
{
.name = "Kernel",
.offset = MTDPART_OFS_NXTBLK,
.size = MPR2_MTD_KERNEL_SIZE,
},
/* Rest is used for Flash FS */
{
.name = "Flash_FS",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
static struct physmap_flash_data flash_data = {
.parts = mpr2_partitions,
.nr_parts = ARRAY_SIZE(mpr2_partitions),
.width = 2,
};
static struct resource flash_resource = {
.start = 0x00000000,
.end = 0x2000000UL,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = -1,
.resource = &flash_resource,
.num_resources = 1,
.dev = {
.platform_data = &flash_data,
},
};
/*
* Add all resources to the platform_device
*/
static struct platform_device *mpr2_devices[] __initdata = {
&heartbeat_device,
&smsc911x_device,
&flash_device,
};
static int __init mpr2_devices_setup(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices));
}
device_initcall(mpr2_devices_setup);
/*
* Initialize IRQ setting
*/
static void __init init_mpr2_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */
irq_set_irq_type(evt2irq(0x600), IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */
irq_set_irq_type(evt2irq(0x620), IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */
irq_set_irq_type(evt2irq(0x640), IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */
irq_set_irq_type(evt2irq(0x660), IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */
irq_set_irq_type(evt2irq(0x680), IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */
irq_set_irq_type(evt2irq(0x6a0), IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */
intc_set_priority(evt2irq(0x600), 13); /* IRQ0 CAN1 */
intc_set_priority(evt2irq(0x620), 13); /* IRQ0 CAN2 */
intc_set_priority(evt2irq(0x640), 13); /* IRQ0 CAN3 */
intc_set_priority(evt2irq(0x660), 6); /* IRQ3 SMSC9115 */
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_mpr2 __initmv = {
.mv_name = "mpr2",
.mv_setup = mpr2_setup,
.mv_init_irq = init_mpr2_IRQ,
};
| gpl-2.0 |
dengbiao/tc_kernel_linux | drivers/serial/sc26xx.c | 4727 | 16855 | /*
* SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
*
* Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/circ_buf.h>
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#if defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
#define SC26XX_MAJOR 204
#define SC26XX_MINOR_START 205
#define SC26XX_NR 2
struct uart_sc26xx_port {
struct uart_port port[2];
u8 dsr_mask[2];
u8 cts_mask[2];
u8 dcd_mask[2];
u8 ri_mask[2];
u8 dtr_mask[2];
u8 rts_mask[2];
u8 imr;
};
/* register common to both ports */
#define RD_ISR 0x14
#define RD_IPR 0x34
#define WR_ACR 0x10
#define WR_IMR 0x14
#define WR_OPCR 0x34
#define WR_OPR_SET 0x38
#define WR_OPR_CLR 0x3C
/* access common register */
#define READ_SC(p, r) readb((p)->membase + RD_##r)
#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
/* register per port */
#define RD_PORT_MRx 0x00
#define RD_PORT_SR 0x04
#define RD_PORT_RHR 0x0c
#define WR_PORT_MRx 0x00
#define WR_PORT_CSR 0x04
#define WR_PORT_CR 0x08
#define WR_PORT_THR 0x0c
/* SR bits */
#define SR_BREAK (1 << 7)
#define SR_FRAME (1 << 6)
#define SR_PARITY (1 << 5)
#define SR_OVERRUN (1 << 4)
#define SR_TXRDY (1 << 2)
#define SR_RXRDY (1 << 0)
#define CR_RES_MR (1 << 4)
#define CR_RES_RX (2 << 4)
#define CR_RES_TX (3 << 4)
#define CR_STRT_BRK (6 << 4)
#define CR_STOP_BRK (7 << 4)
#define CR_DIS_TX (1 << 3)
#define CR_ENA_TX (1 << 2)
#define CR_DIS_RX (1 << 1)
#define CR_ENA_RX (1 << 0)
/* ISR bits */
#define ISR_RXRDYB (1 << 5)
#define ISR_TXRDYB (1 << 4)
#define ISR_RXRDYA (1 << 1)
#define ISR_TXRDYA (1 << 0)
/* IMR bits */
#define IMR_RXRDY (1 << 1)
#define IMR_TXRDY (1 << 0)
/* access port register */
static inline u8 read_sc_port(struct uart_port *p, u8 reg)
{
return readb(p->membase + p->line * 0x20 + reg);
}
static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
{
writeb(val, p->membase + p->line * 0x20 + reg);
}
#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
static void sc26xx_enable_irq(struct uart_port *port, int mask)
{
struct uart_sc26xx_port *up;
int line = port->line;
port -= line;
up = container_of(port, struct uart_sc26xx_port, port[0]);
up->imr |= mask << (line * 4);
WRITE_SC(port, IMR, up->imr);
}
static void sc26xx_disable_irq(struct uart_port *port, int mask)
{
struct uart_sc26xx_port *up;
int line = port->line;
port -= line;
up = container_of(port, struct uart_sc26xx_port, port[0]);
up->imr &= ~(mask << (line * 4));
WRITE_SC(port, IMR, up->imr);
}
static struct tty_struct *receive_chars(struct uart_port *port)
{
struct tty_struct *tty = NULL;
int limit = 10000;
unsigned char ch;
char flag;
u8 status;
if (port->state != NULL) /* Unopened serial console */
tty = port->state->port.tty;
while (limit-- > 0) {
status = READ_SC_PORT(port, SR);
if (!(status & SR_RXRDY))
break;
ch = READ_SC_PORT(port, RHR);
flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(status & (SR_BREAK | SR_FRAME |
SR_PARITY | SR_OVERRUN))) {
if (status & SR_BREAK) {
status &= ~(SR_PARITY | SR_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (status & SR_PARITY)
port->icount.parity++;
else if (status & SR_FRAME)
port->icount.frame++;
if (status & SR_OVERRUN)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & SR_BREAK)
flag = TTY_BREAK;
else if (status & SR_PARITY)
flag = TTY_PARITY;
else if (status & SR_FRAME)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue;
if (status & port->ignore_status_mask)
continue;
tty_insert_flip_char(tty, ch, flag);
}
return tty;
}
static void transmit_chars(struct uart_port *port)
{
struct circ_buf *xmit;
if (!port->state)
return;
xmit = &port->state->xmit;
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
sc26xx_disable_irq(port, IMR_TXRDY);
return;
}
while (!uart_circ_empty(xmit)) {
if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
break;
WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
{
struct uart_sc26xx_port *up = dev_id;
struct tty_struct *tty;
unsigned long flags;
u8 isr;
spin_lock_irqsave(&up->port[0].lock, flags);
tty = NULL;
isr = READ_SC(&up->port[0], ISR);
if (isr & ISR_TXRDYA)
transmit_chars(&up->port[0]);
if (isr & ISR_RXRDYA)
tty = receive_chars(&up->port[0]);
spin_unlock(&up->port[0].lock);
if (tty)
tty_flip_buffer_push(tty);
spin_lock(&up->port[1].lock);
tty = NULL;
if (isr & ISR_TXRDYB)
transmit_chars(&up->port[1]);
if (isr & ISR_RXRDYB)
tty = receive_chars(&up->port[1]);
spin_unlock_irqrestore(&up->port[1].lock, flags);
if (tty)
tty_flip_buffer_push(tty);
return IRQ_HANDLED;
}
/* port->lock is not held. */
static unsigned int sc26xx_tx_empty(struct uart_port *port)
{
return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
}
/* port->lock held by caller. */
static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_sc26xx_port *up;
int line = port->line;
port -= line;
up = container_of(port, struct uart_sc26xx_port, port[0]);
if (up->dtr_mask[line]) {
if (mctrl & TIOCM_DTR)
WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
else
WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
}
if (up->rts_mask[line]) {
if (mctrl & TIOCM_RTS)
WRITE_SC(port, OPR_SET, up->rts_mask[line]);
else
WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
}
}
/* port->lock is held by caller and interrupts are disabled. */
static unsigned int sc26xx_get_mctrl(struct uart_port *port)
{
struct uart_sc26xx_port *up;
int line = port->line;
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
u8 ipr;
port -= line;
up = container_of(port, struct uart_sc26xx_port, port[0]);
ipr = READ_SC(port, IPR) ^ 0xff;
if (up->dsr_mask[line]) {
mctrl &= ~TIOCM_DSR;
mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
}
if (up->cts_mask[line]) {
mctrl &= ~TIOCM_CTS;
mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
}
if (up->dcd_mask[line]) {
mctrl &= ~TIOCM_CAR;
mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
}
if (up->ri_mask[line]) {
mctrl &= ~TIOCM_RNG;
mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
}
return mctrl;
}
/* port->lock held by caller. */
static void sc26xx_stop_tx(struct uart_port *port)
{
return;
}
/* port->lock held by caller. */
static void sc26xx_start_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
while (!uart_circ_empty(xmit)) {
if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
sc26xx_enable_irq(port, IMR_TXRDY);
break;
}
WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
}
/* port->lock held by caller. */
static void sc26xx_stop_rx(struct uart_port *port)
{
}
/* port->lock held by caller. */
static void sc26xx_enable_ms(struct uart_port *port)
{
}
/* port->lock is not held. */
static void sc26xx_break_ctl(struct uart_port *port, int break_state)
{
if (break_state == -1)
WRITE_SC_PORT(port, CR, CR_STRT_BRK);
else
WRITE_SC_PORT(port, CR, CR_STOP_BRK);
}
/* port->lock is not held. */
static int sc26xx_startup(struct uart_port *port)
{
sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
WRITE_SC(port, OPCR, 0);
/* reset tx and rx */
WRITE_SC_PORT(port, CR, CR_RES_RX);
WRITE_SC_PORT(port, CR, CR_RES_TX);
/* start rx/tx */
WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
/* enable irqs */
sc26xx_enable_irq(port, IMR_RXRDY);
return 0;
}
/* port->lock is not held. */
static void sc26xx_shutdown(struct uart_port *port)
{
/* disable interrupst */
sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
/* stop tx/rx */
WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
}
/* port->lock is not held. */
static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
unsigned int iflag, cflag;
unsigned long flags;
u8 mr1, mr2, csr;
spin_lock_irqsave(&port->lock, flags);
while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
udelay(2);
WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
iflag = termios->c_iflag;
cflag = termios->c_cflag;
port->read_status_mask = SR_OVERRUN;
if (iflag & INPCK)
port->read_status_mask |= SR_PARITY | SR_FRAME;
if (iflag & (BRKINT | PARMRK))
port->read_status_mask |= SR_BREAK;
port->ignore_status_mask = 0;
if (iflag & IGNBRK)
port->ignore_status_mask |= SR_BREAK;
if ((cflag & CREAD) == 0)
port->ignore_status_mask |= SR_BREAK | SR_FRAME |
SR_PARITY | SR_OVERRUN;
switch (cflag & CSIZE) {
case CS5:
mr1 = 0x00;
break;
case CS6:
mr1 = 0x01;
break;
case CS7:
mr1 = 0x02;
break;
default:
case CS8:
mr1 = 0x03;
break;
}
mr2 = 0x07;
if (cflag & CSTOPB)
mr2 = 0x0f;
if (cflag & PARENB) {
if (cflag & PARODD)
mr1 |= (1 << 2);
} else
mr1 |= (2 << 3);
switch (baud) {
case 50:
csr = 0x00;
break;
case 110:
csr = 0x11;
break;
case 134:
csr = 0x22;
break;
case 200:
csr = 0x33;
break;
case 300:
csr = 0x44;
break;
case 600:
csr = 0x55;
break;
case 1200:
csr = 0x66;
break;
case 2400:
csr = 0x88;
break;
case 4800:
csr = 0x99;
break;
default:
case 9600:
csr = 0xbb;
break;
case 19200:
csr = 0xcc;
break;
}
WRITE_SC_PORT(port, CR, CR_RES_MR);
WRITE_SC_PORT(port, MRx, mr1);
WRITE_SC_PORT(port, MRx, mr2);
WRITE_SC(port, ACR, 0x80);
WRITE_SC_PORT(port, CSR, csr);
/* reset tx and rx */
WRITE_SC_PORT(port, CR, CR_RES_RX);
WRITE_SC_PORT(port, CR, CR_RES_TX);
WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
udelay(2);
/* XXX */
uart_update_timeout(port, cflag,
(port->uartclk / (16 * quot)));
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *sc26xx_type(struct uart_port *port)
{
return "SC26XX";
}
static void sc26xx_release_port(struct uart_port *port)
{
}
static int sc26xx_request_port(struct uart_port *port)
{
return 0;
}
static void sc26xx_config_port(struct uart_port *port, int flags)
{
}
static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
{
return -EINVAL;
}
static struct uart_ops sc26xx_ops = {
.tx_empty = sc26xx_tx_empty,
.set_mctrl = sc26xx_set_mctrl,
.get_mctrl = sc26xx_get_mctrl,
.stop_tx = sc26xx_stop_tx,
.start_tx = sc26xx_start_tx,
.stop_rx = sc26xx_stop_rx,
.enable_ms = sc26xx_enable_ms,
.break_ctl = sc26xx_break_ctl,
.startup = sc26xx_startup,
.shutdown = sc26xx_shutdown,
.set_termios = sc26xx_set_termios,
.type = sc26xx_type,
.release_port = sc26xx_release_port,
.request_port = sc26xx_request_port,
.config_port = sc26xx_config_port,
.verify_port = sc26xx_verify_port,
};
static struct uart_port *sc26xx_port;
#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
static void sc26xx_console_putchar(struct uart_port *port, char c)
{
unsigned long flags;
int limit = 1000000;
spin_lock_irqsave(&port->lock, flags);
while (limit-- > 0) {
if (READ_SC_PORT(port, SR) & SR_TXRDY) {
WRITE_SC_PORT(port, THR, c);
break;
}
udelay(2);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
{
struct uart_port *port = sc26xx_port;
int i;
for (i = 0; i < n; i++) {
if (*s == '\n')
sc26xx_console_putchar(port, '\r');
sc26xx_console_putchar(port, *s++);
}
}
static int __init sc26xx_console_setup(struct console *con, char *options)
{
struct uart_port *port = sc26xx_port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (port->type != PORT_SC26XX)
return -1;
printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, con, baud, parity, bits, flow);
}
static struct uart_driver sc26xx_reg;
static struct console sc26xx_console = {
.name = "ttySC",
.write = sc26xx_console_write,
.device = uart_console_device,
.setup = sc26xx_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sc26xx_reg,
};
#define SC26XX_CONSOLE &sc26xx_console
#else
#define SC26XX_CONSOLE NULL
#endif
static struct uart_driver sc26xx_reg = {
.owner = THIS_MODULE,
.driver_name = "SC26xx",
.dev_name = "ttySC",
.major = SC26XX_MAJOR,
.minor = SC26XX_MINOR_START,
.nr = SC26XX_NR,
.cons = SC26XX_CONSOLE,
};
static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
{
unsigned int bit = (flags >> bitpos) & 15;
return bit ? (1 << (bit - 1)) : 0;
}
static void __devinit sc26xx_init_masks(struct uart_sc26xx_port *up,
int line, unsigned int data)
{
up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
up->rts_mask[line] = sc26xx_flags2mask(data, 4);
up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
up->cts_mask[line] = sc26xx_flags2mask(data, 12);
up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
up->ri_mask[line] = sc26xx_flags2mask(data, 20);
}
static int __devinit sc26xx_probe(struct platform_device *dev)
{
struct resource *res;
struct uart_sc26xx_port *up;
unsigned int *sc26xx_data = dev->dev.platform_data;
int err;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
up = kzalloc(sizeof *up, GFP_KERNEL);
if (unlikely(!up))
return -ENOMEM;
up->port[0].line = 0;
up->port[0].ops = &sc26xx_ops;
up->port[0].type = PORT_SC26XX;
up->port[0].uartclk = (29491200 / 16); /* arbitrary */
up->port[0].mapbase = res->start;
up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
up->port[0].iotype = UPIO_MEM;
up->port[0].irq = platform_get_irq(dev, 0);
up->port[0].dev = &dev->dev;
sc26xx_init_masks(up, 0, sc26xx_data[0]);
sc26xx_port = &up->port[0];
up->port[1].line = 1;
up->port[1].ops = &sc26xx_ops;
up->port[1].type = PORT_SC26XX;
up->port[1].uartclk = (29491200 / 16); /* arbitrary */
up->port[1].mapbase = up->port[0].mapbase;
up->port[1].membase = up->port[0].membase;
up->port[1].iotype = UPIO_MEM;
up->port[1].irq = up->port[0].irq;
up->port[1].dev = &dev->dev;
sc26xx_init_masks(up, 1, sc26xx_data[1]);
err = uart_register_driver(&sc26xx_reg);
if (err)
goto out_free_port;
sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
if (err)
goto out_unregister_driver;
err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
if (err)
goto out_remove_port0;
err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
if (err)
goto out_remove_ports;
dev_set_drvdata(&dev->dev, up);
return 0;
out_remove_ports:
uart_remove_one_port(&sc26xx_reg, &up->port[1]);
out_remove_port0:
uart_remove_one_port(&sc26xx_reg, &up->port[0]);
out_unregister_driver:
uart_unregister_driver(&sc26xx_reg);
out_free_port:
kfree(up);
sc26xx_port = NULL;
return err;
}
static int __exit sc26xx_driver_remove(struct platform_device *dev)
{
struct uart_sc26xx_port *up = dev_get_drvdata(&dev->dev);
free_irq(up->port[0].irq, up);
uart_remove_one_port(&sc26xx_reg, &up->port[0]);
uart_remove_one_port(&sc26xx_reg, &up->port[1]);
uart_unregister_driver(&sc26xx_reg);
kfree(up);
sc26xx_port = NULL;
dev_set_drvdata(&dev->dev, NULL);
return 0;
}
static struct platform_driver sc26xx_driver = {
.probe = sc26xx_probe,
.remove = __devexit_p(sc26xx_driver_remove),
.driver = {
.name = "SC26xx",
.owner = THIS_MODULE,
},
};
static int __init sc26xx_init(void)
{
return platform_driver_register(&sc26xx_driver);
}
static void __exit sc26xx_exit(void)
{
platform_driver_unregister(&sc26xx_driver);
}
module_init(sc26xx_init);
module_exit(sc26xx_exit);
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SC681/SC2692 serial driver");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:SC26xx");
| gpl-2.0 |
TeamBliss-Devices/android_kernel_lge_hammerhead | drivers/net/ethernet/natsemi/jazzsonic.c | 4983 | 7584 | /*
* jazzsonic.c
*
* (C) 2005 Finn Thain
*
* Converted to DMA API, and (from the mac68k project) introduced
* dhd's support for 16-bit cards.
*
* (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
* This driver is based on work from Andreas Busse, but most of
* the code is rewritten.
*
* (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
*
* A driver for the onboard Sonic ethernet controller on Mips Jazz
* systems (Acer Pica-61, Mips Magnum 4000, Olivetti M700 and
* perhaps others, too)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
static char jazz_sonic_string[] = "jazzsonic";
#define SONIC_MEM_SIZE 0x100
#include "sonic.h"
/*
* Macros to access SONIC registers
*/
#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg))
#define SONIC_WRITE(reg,val) \
do { \
*((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
} while (0)
/* use 0 for production, 1 for verification, >1 for debug */
#ifdef SONIC_DEBUG
static unsigned int sonic_debug = SONIC_DEBUG;
#else
static unsigned int sonic_debug = 1;
#endif
/*
* We cannot use station (ethernet) address prefixes to detect the
* sonic controller since these are board manufacturer depended.
* So we check for known Silicon Revision IDs instead.
*/
static unsigned short known_revisions[] =
{
0x04, /* Mips Magnum 4000 */
0xffff /* end of list */
};
static int jazzsonic_open(struct net_device* dev)
{
int retval;
retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
"sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
return retval;
}
retval = sonic_open(dev);
if (retval)
free_irq(dev->irq, dev);
return retval;
}
static int jazzsonic_close(struct net_device* dev)
{
int err;
err = sonic_close(dev);
free_irq(dev->irq, dev);
return err;
}
static const struct net_device_ops sonic_netdev_ops = {
.ndo_open = jazzsonic_open,
.ndo_stop = jazzsonic_close,
.ndo_start_xmit = sonic_send_packet,
.ndo_get_stats = sonic_get_stats,
.ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
static int __devinit sonic_probe1(struct net_device *dev)
{
static unsigned version_printed;
unsigned int silicon_revision;
unsigned int val;
struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
/*
* get the Silicon Revision ID. If this is one of the known
* one assume that we found a SONIC ethernet controller at
* the expected location.
*/
silicon_revision = SONIC_READ(SONIC_SR);
if (sonic_debug > 1)
printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
i = 0;
while (known_revisions[i] != 0xffff &&
known_revisions[i] != silicon_revision)
i++;
if (known_revisions[i] == 0xffff) {
printk("SONIC ethernet controller not found (0x%4x)\n",
silicon_revision);
goto out;
}
if (sonic_debug && version_printed++ == 0)
printk(version);
printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
dev_name(lp->device), dev->base_addr);
/*
* Put the sonic into software reset, then
* retrieve and print the ethernet address.
*/
SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
val = SONIC_READ(SONIC_CAP0-i);
dev->dev_addr[i*2] = val;
dev->dev_addr[i*2+1] = val >> 8;
}
err = -ENOMEM;
/* Initialize the device structure. */
lp->dma_bitmode = SONIC_BITMODE32;
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
dev_name(lp->device));
goto out;
}
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
lp->tda = lp->cda + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->cda_laddr = lp->descriptors_laddr;
lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
/*
* clear tally counter
*/
SONIC_WRITE(SONIC_CRCT,0xffff);
SONIC_WRITE(SONIC_FAET,0xffff);
SONIC_WRITE(SONIC_MPT,0xffff);
return 0;
out:
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
return err;
}
/*
* Probe for a SONIC ethernet controller on a Mips Jazz board.
* Actually probing is superfluous but we're paranoid.
*/
static int __devinit jazz_sonic_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
struct resource *res;
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
dev = alloc_etherdev(sizeof(struct sonic_local));
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
lp->device = &pdev->dev;
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
netdev_boot_setup_check(dev);
dev->base_addr = res->start;
dev->irq = platform_get_irq(pdev, 0);
err = sonic_probe1(dev);
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
return 0;
out1:
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
return err;
}
MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
module_param(sonic_debug, int, 0);
MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
MODULE_ALIAS("platform:jazzsonic");
#include "sonic.c"
static int __devexit jazz_sonic_device_remove (struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
unregister_netdev(dev);
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
free_netdev(dev);
return 0;
}
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
.remove = __devexit_p(jazz_sonic_device_remove),
.driver = {
.name = jazz_sonic_string,
.owner = THIS_MODULE,
},
};
module_platform_driver(jazz_sonic_driver);
| gpl-2.0 |
flar2/m8-Sense-4.4.4 | drivers/net/ethernet/natsemi/macsonic.c | 4983 | 18638 | /*
* macsonic.c
*
* (C) 2005 Finn Thain
*
* Converted to DMA API, converted to unified driver model, made it work as
* a module again, and from the mac68k project, introduced more 32-bit cards
* and dhd's support for 16-bit cards.
*
* (C) 1998 Alan Cox
*
* Debugging Andreas Ehliar, Michael Schmitz
*
* Based on code
* (C) 1996 by Thomas Bogendoerfer (tsbogend@bigbug.franken.de)
*
* This driver is based on work from Andreas Busse, but most of
* the code is rewritten.
*
* (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
*
* A driver for the Mac onboard Sonic ethernet chip.
*
* 98/12/21 MSch: judged from tests on Q800, it's basically working,
* but eating up both receive and transmit resources
* and duplicating packets. Needs more testing.
*
* 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed.
*
* 00/10/31 sammy@oh.verio.com: Updated driver for 2.4 kernels, fixed problems
* on centris.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/nubus.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <asm/bootinfo.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hwtest.h>
#include <asm/dma.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
static char mac_sonic_string[] = "macsonic";
#include "sonic.h"
/* These should basically be bus-size and endian independent (since
the SONIC is at least smart enough that it uses the same endianness
as the host, unlike certain less enlightened Macintosh NICs) */
#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \
+ lp->reg_offset))
#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
+ lp->reg_offset))
/* use 0 for production, 1 for verification, >1 for debug */
#ifdef SONIC_DEBUG
static unsigned int sonic_debug = SONIC_DEBUG;
#else
static unsigned int sonic_debug = 1;
#endif
static int sonic_version_printed;
/* For onboard SONIC */
#define ONBOARD_SONIC_REGISTERS 0x50F0A000
#define ONBOARD_SONIC_PROM_BASE 0x50f08000
enum macsonic_type {
MACSONIC_DUODOCK,
MACSONIC_APPLE,
MACSONIC_APPLE16,
MACSONIC_DAYNA,
MACSONIC_DAYNALINK
};
/* For the built-in SONIC in the Duo Dock */
#define DUODOCK_SONIC_REGISTERS 0xe10000
#define DUODOCK_SONIC_PROM_BASE 0xe12000
/* For Apple-style NuBus SONIC */
#define APPLE_SONIC_REGISTERS 0
#define APPLE_SONIC_PROM_BASE 0x40000
/* Daynalink LC SONIC */
#define DAYNALINK_PROM_BASE 0x400000
/* For Dayna-style NuBus SONIC (haven't seen one yet) */
#define DAYNA_SONIC_REGISTERS 0x180000
/* This is what OpenBSD says. However, this is definitely in NuBus
ROM space so we should be able to get it by walking the NuBus
resource directories */
#define DAYNA_SONIC_MAC_ADDR 0xffe004
#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
/*
* For reversing the PROM address
*/
static inline void bit_reverse_addr(unsigned char addr[6])
{
int i;
for(i = 0; i < 6; i++)
addr[i] = bitrev8(addr[i]);
}
static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
{
irqreturn_t result;
unsigned long flags;
local_irq_save(flags);
result = sonic_interrupt(irq, dev_id);
local_irq_restore(flags);
return result;
}
static int macsonic_open(struct net_device* dev)
{
int retval;
retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
goto err;
}
/* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
* in at priority level 3. However, we sometimes get the level 2 inter-
* rupt as well, which must prevent re-entrance of the sonic handler.
*/
if (dev->irq == IRQ_AUTO_3) {
retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
"sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, IRQ_NUBUS_9);
goto err_irq;
}
}
retval = sonic_open(dev);
if (retval)
goto err_irq_nubus;
return 0;
err_irq_nubus:
if (dev->irq == IRQ_AUTO_3)
free_irq(IRQ_NUBUS_9, dev);
err_irq:
free_irq(dev->irq, dev);
err:
return retval;
}
static int macsonic_close(struct net_device* dev)
{
int err;
err = sonic_close(dev);
free_irq(dev->irq, dev);
if (dev->irq == IRQ_AUTO_3)
free_irq(IRQ_NUBUS_9, dev);
return err;
}
static const struct net_device_ops macsonic_netdev_ops = {
.ndo_open = macsonic_open,
.ndo_stop = macsonic_close,
.ndo_start_xmit = sonic_send_packet,
.ndo_set_rx_mode = sonic_multicast_list,
.ndo_tx_timeout = sonic_tx_timeout,
.ndo_get_stats = sonic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
static int __devinit macsonic_init(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
dev_name(lp->device));
return -ENOMEM;
}
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
lp->tda = lp->cda + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->cda_laddr = lp->descriptors_laddr;
lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
* SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &macsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
/*
* clear tally counter
*/
SONIC_WRITE(SONIC_CRCT, 0xffff);
SONIC_WRITE(SONIC_FAET, 0xffff);
SONIC_WRITE(SONIC_MPT, 0xffff);
return 0;
}
#define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \
memcmp(mac, "\x00\xA0\x40", 3) && \
memcmp(mac, "\x00\x80\x19", 3) && \
memcmp(mac, "\x00\x05\x02", 3))
static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
unsigned short val;
/*
* On NuBus boards we can sometimes look in the ROM resources.
* No such luck for comm-slot/onboard.
* On the PowerBook 520, the PROM base address is a mystery.
*/
if (hwreg_present((void *)prom_addr)) {
int i;
for (i = 0; i < 6; i++)
dev->dev_addr[i] = SONIC_READ_PROM(i);
if (!INVALID_MAC(dev->dev_addr))
return;
/*
* Most of the time, the address is bit-reversed. The NetBSD
* source has a rather long and detailed historical account of
* why this is so.
*/
bit_reverse_addr(dev->dev_addr);
if (!INVALID_MAC(dev->dev_addr))
return;
/*
* If we still have what seems to be a bogus address, we'll
* look in the CAM. The top entry should be ours.
*/
printk(KERN_WARNING "macsonic: MAC address in PROM seems "
"to be invalid, trying CAM\n");
} else {
printk(KERN_WARNING "macsonic: cannot read MAC address from "
"PROM, trying CAM\n");
}
/* This only works if MacOS has already initialized the card. */
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
SONIC_WRITE(SONIC_CEP, 15);
val = SONIC_READ(SONIC_CAP2);
dev->dev_addr[5] = val >> 8;
dev->dev_addr[4] = val & 0xff;
val = SONIC_READ(SONIC_CAP1);
dev->dev_addr[3] = val >> 8;
dev->dev_addr[2] = val & 0xff;
val = SONIC_READ(SONIC_CAP0);
dev->dev_addr[1] = val >> 8;
dev->dev_addr[0] = val & 0xff;
if (!INVALID_MAC(dev->dev_addr))
return;
/* Still nonsense ... messed up someplace! */
printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 "
"seems invalid, will use a random MAC\n");
eth_hw_addr_random(dev);
}
static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
int commslot = 0;
if (!MACH_IS_MAC)
return -ENODEV;
printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
/* Bogus probing, on the models which may or may not have
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
if (macintosh_config->ident == MAC_MODEL_Q630 ||
macintosh_config->ident == MAC_MODEL_P588 ||
macintosh_config->ident == MAC_MODEL_P575 ||
macintosh_config->ident == MAC_MODEL_C610) {
unsigned long flags;
int card_present;
local_irq_save(flags);
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
local_irq_restore(flags);
if (!card_present) {
printk("none.\n");
return -ENODEV;
}
commslot = 1;
}
printk("yes\n");
/* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
* and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
dev->base_addr = ONBOARD_SONIC_REGISTERS;
if (via_alt_mapping)
dev->irq = IRQ_AUTO_3;
else
dev->irq = IRQ_NUBUS_9;
if (!sonic_version_printed) {
printk(KERN_INFO "%s", version);
sonic_version_printed = 1;
}
printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
dev_name(lp->device), dev->base_addr);
/* The PowerBook's SONIC is 16 bit always. */
if (macintosh_config->ident == MAC_MODEL_PB520) {
lp->reg_offset = 0;
lp->dma_bitmode = SONIC_BITMODE16;
sr = SONIC_READ(SONIC_SR);
} else if (commslot) {
/* Some of the comm-slot cards are 16 bit. But some
of them are not. The 32-bit cards use offset 2 and
have known revisions, we try reading the revision
register at offset 2, if we don't get a known revision
we assume 16 bit at offset 0. */
lp->reg_offset = 2;
lp->dma_bitmode = SONIC_BITMODE16;
sr = SONIC_READ(SONIC_SR);
if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101)
/* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */
lp->dma_bitmode = SONIC_BITMODE32;
else {
lp->dma_bitmode = SONIC_BITMODE16;
lp->reg_offset = 0;
sr = SONIC_READ(SONIC_SR);
}
} else {
/* All onboard cards are at offset 2 with 32 bit DMA. */
lp->reg_offset = 2;
lp->dma_bitmode = SONIC_BITMODE32;
sr = SONIC_READ(SONIC_SR);
}
printk(KERN_INFO
"%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset);
#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
#endif
/* Software reset, then initialize control registers. */
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS |
SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
(lp->dma_bitmode ? SONIC_DCR_DW : 0));
/* This *must* be written back to in order to restore the
* extended programmable output bits, as it may not have been
* initialised since the hardware reset. */
SONIC_WRITE(SONIC_DCR2, 0);
/* Clear *and* disable interrupts to be on the safe side */
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
/* Now look for the MAC address. */
mac_onboard_sonic_ethernet_addr(dev);
/* Shared init code */
return macsonic_init(dev);
}
static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
unsigned long prom_addr,
int id)
{
int i;
for(i = 0; i < 6; i++)
dev->dev_addr[i] = SONIC_READ_PROM(i);
/* Some of the addresses are bit-reversed */
if (id != MACSONIC_DAYNA)
bit_reverse_addr(dev->dev_addr);
return 0;
}
static int __devinit macsonic_ident(struct nubus_dev *ndev)
{
if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
ndev->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
if (strstr(ndev->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
ndev->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
ndev->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
}
static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
struct nubus_dev* ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
int id = -1;
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
NUBUS_TYPE_ETHERNET, ndev)) != NULL)
{
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
slots |= 1<<ndev->board->slot;
/* Is it one of ours? */
if ((id = macsonic_ident(ndev)) != -1)
break;
}
if (ndev == NULL)
return -ENODEV;
switch (id) {
case MACSONIC_DUODOCK:
base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
SONIC_DCR_TFT0;
reg_offset = 2;
dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE16:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNALINK:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNA:
base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
sonic_dcr = SONIC_DCR_BMS |
SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE16;
break;
default:
printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
return -ENODEV;
}
/* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
* and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
dev->base_addr = base_addr;
lp->reg_offset = reg_offset;
lp->dma_bitmode = dma_bitmode;
dev->irq = SLOT2IRQ(ndev->board->slot);
if (!sonic_version_printed) {
printk(KERN_INFO "%s", version);
sonic_version_printed = 1;
}
printk(KERN_INFO "%s: %s in slot %X\n",
dev_name(lp->device), ndev->board->name, ndev->board->slot);
printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
#endif
/* Software reset, then initialize control registers. */
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0));
/* This *must* be written back to in order to restore the
* extended programmable output bits, since it may not have been
* initialised since the hardware reset. */
SONIC_WRITE(SONIC_DCR2, 0);
/* Clear *and* disable interrupts to be on the safe side */
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
/* Now look for the MAC address. */
if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
return -ENODEV;
/* Shared init code */
return macsonic_init(dev);
}
static int __devinit mac_sonic_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
int err;
dev = alloc_etherdev(sizeof(struct sonic_local));
if (!dev)
return -ENOMEM;
lp = netdev_priv(dev);
lp->device = &pdev->dev;
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
/* This will catch fatal stuff like -ENOMEM as well as success */
err = mac_onboard_sonic_probe(dev);
if (err == 0)
goto found;
if (err != -ENODEV)
goto out;
err = mac_nubus_sonic_probe(dev);
if (err)
goto out;
found:
err = register_netdev(dev);
if (err)
goto out;
printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
return 0;
out:
free_netdev(dev);
return err;
}
MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
module_param(sonic_debug, int, 0);
MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
static int __devexit mac_sonic_device_remove (struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
unregister_netdev(dev);
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
free_netdev(dev);
return 0;
}
static struct platform_driver mac_sonic_driver = {
.probe = mac_sonic_probe,
.remove = __devexit_p(mac_sonic_device_remove),
.driver = {
.name = mac_sonic_string,
.owner = THIS_MODULE,
},
};
module_platform_driver(mac_sonic_driver);
| gpl-2.0 |
Mirenk/android_kernel_semc_msm7x30 | drivers/net/ethernet/mellanox/mlx4/qp.c | 4983 | 13812 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include "mlx4.h"
#include "icm.h"
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
spin_lock(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
atomic_inc(&qp->refcount);
spin_unlock(&qp_table->lock);
if (!qp) {
mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
qp->event(qp, event_type);
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
return qp->qpn >= dev->caps.sqp_start &&
qp->qpn <= dev->caps.sqp_start + 1;
}
static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context,
enum mlx4_qp_optpar optpar,
int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
},
[MLX4_QP_STATE_INIT] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
[MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
},
[MLX4_QP_STATE_RTR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
},
[MLX4_QP_STATE_RTS] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
},
[MLX4_QP_STATE_SQD] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
},
[MLX4_QP_STATE_SQER] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
},
[MLX4_QP_STATE_ERR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
}
};
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
ret = mlx4_cmd(dev, 0, qp->qpn, 2,
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
cur_state != MLX4_QP_STATE_RST &&
is_qp0(dev, qp)) {
port = (qp->qpn & 1) + 1;
priv->mfunc.master.qp0_state[port].qp0_active = 0;
}
return ret;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
context->mtt_base_addr_h = mtt_addr >> 32;
context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
ret = mlx4_cmd(dev, mailbox->dma,
qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context,
enum mlx4_qp_optpar optpar,
int sqd_event, struct mlx4_qp *qp)
{
return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
optpar, sqd_event, qp, 0);
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
if (*base == -1)
return -ENOMEM;
return 0;
}
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
{
u64 in_param;
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, cnt);
set_param_h(&in_param, align);
err = mlx4_cmd_imm(dev, in_param, &out_param,
RES_QP, RES_OP_RESERVE,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
return err;
*base = get_param_l(&out_param);
return 0;
}
return __mlx4_qp_reserve_range(dev, cnt, align, base);
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
u64 in_param;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
mlx4_warn(dev, "Failed to release qp range"
" base:%d cnt:%d\n", base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
return 0;
err_put_rdmarc:
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
u64 param;
if (mlx4_is_mfunc(dev)) {
set_param_l(¶m, qpn);
return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM,
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
return __mlx4_qp_alloc_icm(dev, qpn);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
mlx4_table_put(dev, &qp_table->altc_table, qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
mlx4_table_put(dev, &qp_table->qp_table, qpn);
}
static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
{
u64 in_param;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, qpn);
if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED))
mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
} else
__mlx4_qp_free_icm(dev, qpn);
}
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
if (!qpn)
return -EINVAL;
qp->qpn = qpn;
err = mlx4_qp_alloc_icm(dev, qpn);
if (err)
return err;
spin_lock_irq(&qp_table->lock);
err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
(dev->caps.num_qps - 1), qp);
spin_unlock_irq(&qp_table->lock);
if (err)
goto err_icm;
atomic_set(&qp->refcount, 1);
init_completion(&qp->free);
return 0;
err_icm:
mlx4_qp_free_icm(dev, qpn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
unsigned long flags;
spin_lock_irqsave(&qp_table->lock, flags);
radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
int reserved_from_top = 0;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*
* We also reserve the MSB of the 24-bit QP number to indicate
* that a QP is an XRC QP.
*/
dev->caps.sqp_start =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
{
int sort[MLX4_NUM_QP_REGION];
int i, j, tmp;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
sort[i] = i;
for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
for (j = 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
tmp = sort[j];
sort[j] = sort[j - 1];
sort[j - 1] = tmp;
}
}
}
for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
dev->caps.reserved_qps_base[sort[i]] = last_base;
reserved_from_top +=
dev->caps.reserved_qps_cnt[sort[i]];
}
}
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
(1 << 23) - 1, dev->caps.sqp_start + 8,
reserved_from_top);
if (err)
return err;
return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
}
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
if (mlx4_is_slave(dev))
return;
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_qp_context *context,
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
{
int err;
int i;
enum mlx4_qp_state states[] = {
MLX4_QP_STATE_RST,
MLX4_QP_STATE_INIT,
MLX4_QP_STATE_RTR,
MLX4_QP_STATE_RTS
};
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
mlx4_err(dev, "Failed to bring QP to state: "
"%d with error: %d\n",
states[i + 1], err);
return err;
}
*qp_state = states[i + 1];
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
| gpl-2.0 |
AOKP/kernel_sony_msm8974ab | drivers/video/backlight/vgg2432a4.c | 4983 | 6383 | /* drivers/video/backlight/vgg2432a4.c
*
* VGG2432A4 (ILI9320) LCD controller driver.
*
* Copyright 2007 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <video/ili9320.h>
#include "ili9320.h"
/* Device initialisation sequences */
static struct ili9320_reg vgg_init1[] = {
{
.address = ILI9320_POWER1,
.value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
}, {
.address = ILI9320_POWER2,
.value = (ILI9320_POWER2_VC(7) |
ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)),
}, {
.address = ILI9320_POWER3,
.value = ILI9320_POWER3_VRH(0),
}, {
.address = ILI9320_POWER4,
.value = ILI9320_POWER4_VREOUT(0),
},
};
static struct ili9320_reg vgg_init2[] = {
{
.address = ILI9320_POWER1,
.value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP),
}, {
.address = ILI9320_POWER2,
.value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3),
}
};
static struct ili9320_reg vgg_gamma[] = {
{
.address = ILI9320_GAMMA1,
.value = 0x0000,
}, {
.address = ILI9320_GAMMA2,
.value = 0x0505,
}, {
.address = ILI9320_GAMMA3,
.value = 0x0004,
}, {
.address = ILI9320_GAMMA4,
.value = 0x0006,
}, {
.address = ILI9320_GAMMA5,
.value = 0x0707,
}, {
.address = ILI9320_GAMMA6,
.value = 0x0105,
}, {
.address = ILI9320_GAMMA7,
.value = 0x0002,
}, {
.address = ILI9320_GAMMA8,
.value = 0x0707,
}, {
.address = ILI9320_GAMMA9,
.value = 0x0704,
}, {
.address = ILI9320_GAMMA10,
.value = 0x807,
}
};
static struct ili9320_reg vgg_init0[] = {
[0] = {
/* set direction and scan mode gate */
.address = ILI9320_DRIVER,
.value = ILI9320_DRIVER_SS,
}, {
.address = ILI9320_DRIVEWAVE,
.value = (ILI9320_DRIVEWAVE_MUSTSET |
ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC),
}, {
.address = ILI9320_ENTRYMODE,
.value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR,
}, {
.address = ILI9320_RESIZING,
.value = 0x0,
},
};
static int vgg2432a4_lcd_init(struct ili9320 *lcd,
struct ili9320_platdata *cfg)
{
unsigned int addr;
int ret;
/* Set VCore before anything else (VGG243237-6UFLWA) */
ret = ili9320_write(lcd, 0x00e5, 0x8000);
if (ret)
goto err_initial;
/* Start the oscillator up before we can do anything else. */
ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC);
if (ret)
goto err_initial;
/* must wait at-lesat 10ms after starting */
mdelay(15);
ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0));
if (ret != 0)
goto err_initial;
ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2);
ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3);
ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4);
ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2);
ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
if (ret != 0)
goto err_vgg;
mdelay(300);
ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2));
if (ret != 0)
goto err_vgg2;
mdelay(100);
ili9320_write(lcd, ILI9320_POWER3, 0x13c);
mdelay(100);
ili9320_write(lcd, ILI9320_POWER4, 0x1c00);
ili9320_write(lcd, ILI9320_POWER7, 0x000e);
mdelay(100);
ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00);
ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00);
ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma));
if (ret != 0)
goto err_vgg3;
ili9320_write(lcd, ILI9320_HORIZ_START, 0x0);
ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1);
ili9320_write(lcd, ILI9320_VERT_START, 0x0);
ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1);
ili9320_write(lcd, ILI9320_DRIVER2,
ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D));
ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1);
ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00);
for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END;
addr++) {
ili9320_write(lcd, addr, 0x0);
}
ili9320_write(lcd, ILI9320_INTERFACE1, 0x10);
ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2);
ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3);
ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4);
ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5);
ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6);
lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE |
ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE |
0x40);
ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
return 0;
err_vgg3:
err_vgg2:
err_vgg:
err_initial:
return ret;
}
#ifdef CONFIG_PM
static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
{
return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
}
static int vgg2432a4_resume(struct spi_device *spi)
{
return ili9320_resume(dev_get_drvdata(&spi->dev));
}
#else
#define vgg2432a4_suspend NULL
#define vgg2432a4_resume NULL
#endif
static struct ili9320_client vgg2432a4_client = {
.name = "VGG2432A4",
.init = vgg2432a4_lcd_init,
};
/* Device probe */
static int __devinit vgg2432a4_probe(struct spi_device *spi)
{
int ret;
ret = ili9320_probe_spi(spi, &vgg2432a4_client);
if (ret != 0) {
dev_err(&spi->dev, "failed to initialise ili9320\n");
return ret;
}
return 0;
}
static int __devexit vgg2432a4_remove(struct spi_device *spi)
{
return ili9320_remove(dev_get_drvdata(&spi->dev));
}
static void vgg2432a4_shutdown(struct spi_device *spi)
{
ili9320_shutdown(dev_get_drvdata(&spi->dev));
}
static struct spi_driver vgg2432a4_driver = {
.driver = {
.name = "VGG2432A4",
.owner = THIS_MODULE,
},
.probe = vgg2432a4_probe,
.remove = __devexit_p(vgg2432a4_remove),
.shutdown = vgg2432a4_shutdown,
.suspend = vgg2432a4_suspend,
.resume = vgg2432a4_resume,
};
module_spi_driver(vgg2432a4_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_DESCRIPTION("VGG2432A4 LCD Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:VGG2432A4");
| gpl-2.0 |
hiikezoe/android_kernel_panasonic_p02e | drivers/media/video/pvrusb2/pvrusb2-i2c-core.c | 5239 | 20177 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include <media/ir-kbd-i2c.h>
#include "pvrusb2-i2c-core.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
#include "pvrusb2-fx2-cmd.h"
#include "pvrusb2.h"
#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
/*
This module attempts to implement a compliant I2C adapter for the pvrusb2
device.
*/
static unsigned int i2c_scan;
module_param(i2c_scan, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
static int ir_mode[PVR_NUM] = { [0 ... PVR_NUM-1] = 1 };
module_param_array(ir_mode, int, NULL, 0444);
MODULE_PARM_DESC(ir_mode,"specify: 0=disable IR reception, 1=normal IR");
static int pvr2_disable_ir_video;
module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(disable_autoload_ir_video,
"1=do not try to autoload ir_video IR receiver");
static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
u8 i2c_addr, /* I2C address we're talking to */
u8 *data, /* Data to write */
u16 length) /* Size of data to write */
{
/* Return value - default 0 means success */
int ret;
if (!data) length = 0;
if (length > (sizeof(hdw->cmd_buffer) - 3)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Killing an I2C write to %u that is too large"
" (desired=%u limit=%u)",
i2c_addr,
length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
return -ENOTSUPP;
}
LOCK_TAKE(hdw->ctl_lock);
/* Clear the command buffer (likely to be paranoia) */
memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
/* Set up command buffer for an I2C write */
hdw->cmd_buffer[0] = FX2CMD_I2C_WRITE; /* write prefix */
hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */
hdw->cmd_buffer[2] = length; /* length of what follows */
if (length) memcpy(hdw->cmd_buffer + 3, data, length);
/* Do the operation */
ret = pvr2_send_request(hdw,
hdw->cmd_buffer,
length + 3,
hdw->cmd_buffer,
1);
if (!ret) {
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
trace_i2c("unexpected status"
" from i2_write[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
}
LOCK_GIVE(hdw->ctl_lock);
return ret;
}
static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
u8 i2c_addr, /* I2C address we're talking to */
u8 *data, /* Data to write */
u16 dlen, /* Size of data to write */
u8 *res, /* Where to put data we read */
u16 rlen) /* Amount of data to read */
{
/* Return value - default 0 means success */
int ret;
if (!data) dlen = 0;
if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Killing an I2C read to %u that has wlen too large"
" (desired=%u limit=%u)",
i2c_addr,
dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
return -ENOTSUPP;
}
if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Killing an I2C read to %u that has rlen too large"
" (desired=%u limit=%u)",
i2c_addr,
rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
return -ENOTSUPP;
}
LOCK_TAKE(hdw->ctl_lock);
/* Clear the command buffer (likely to be paranoia) */
memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
/* Set up command buffer for an I2C write followed by a read */
hdw->cmd_buffer[0] = FX2CMD_I2C_READ; /* read prefix */
hdw->cmd_buffer[1] = dlen; /* arg length */
hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one
more byte (status). */
hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */
if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen);
/* Do the operation */
ret = pvr2_send_request(hdw,
hdw->cmd_buffer,
4 + dlen,
hdw->cmd_buffer,
rlen + 1);
if (!ret) {
if (hdw->cmd_buffer[0] != 8) {
ret = -EIO;
if (hdw->cmd_buffer[0] != 7) {
trace_i2c("unexpected status"
" from i2_read[%d]: %d",
i2c_addr,hdw->cmd_buffer[0]);
}
}
}
/* Copy back the result */
if (res && rlen) {
if (ret) {
/* Error, just blank out the return buffer */
memset(res, 0, rlen);
} else {
memcpy(res, hdw->cmd_buffer + 1, rlen);
}
}
LOCK_GIVE(hdw->ctl_lock);
return ret;
}
/* This is the common low level entry point for doing I2C operations to the
hardware. */
static int pvr2_i2c_basic_op(struct pvr2_hdw *hdw,
u8 i2c_addr,
u8 *wdata,
u16 wlen,
u8 *rdata,
u16 rlen)
{
if (!rdata) rlen = 0;
if (!wdata) wlen = 0;
if (rlen || !wlen) {
return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen);
} else {
return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen);
}
}
/* This is a special entry point for cases of I2C transaction attempts to
the IR receiver. The implementation here simulates the IR receiver by
issuing a command to the FX2 firmware and using that response to return
what the real I2C receiver would have returned. We use this for 24xxx
devices, where the IR receiver chip has been removed and replaced with
FX2 related logic. */
static int i2c_24xxx_ir(struct pvr2_hdw *hdw,
u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
{
u8 dat[4];
unsigned int stat;
if (!(rlen || wlen)) {
/* This is a probe attempt. Just let it succeed. */
return 0;
}
/* We don't understand this kind of transaction */
if ((wlen != 0) || (rlen == 0)) return -EIO;
if (rlen < 3) {
/* Mike Isely <isely@pobox.com> Appears to be a probe
attempt from lirc. Just fill in zeroes and return. If
we try instead to do the full transaction here, then bad
things seem to happen within the lirc driver module
(version 0.8.0-7 sources from Debian, when run under
vanilla 2.6.17.6 kernel) - and I don't have the patience
to chase it down. */
if (rlen > 0) rdata[0] = 0;
if (rlen > 1) rdata[1] = 0;
return 0;
}
/* Issue a command to the FX2 to read the IR receiver. */
LOCK_TAKE(hdw->ctl_lock); do {
hdw->cmd_buffer[0] = FX2CMD_GET_IR_CODE;
stat = pvr2_send_request(hdw,
hdw->cmd_buffer,1,
hdw->cmd_buffer,4);
dat[0] = hdw->cmd_buffer[0];
dat[1] = hdw->cmd_buffer[1];
dat[2] = hdw->cmd_buffer[2];
dat[3] = hdw->cmd_buffer[3];
} while (0); LOCK_GIVE(hdw->ctl_lock);
/* Give up if that operation failed. */
if (stat != 0) return stat;
/* Mangle the results into something that looks like the real IR
receiver. */
rdata[2] = 0xc1;
if (dat[0] != 1) {
/* No code received. */
rdata[0] = 0;
rdata[1] = 0;
} else {
u16 val;
/* Mash the FX2 firmware-provided IR code into something
that the normal i2c chip-level driver expects. */
val = dat[1];
val <<= 8;
val |= dat[2];
val >>= 1;
val &= ~0x0003;
val |= 0x8000;
rdata[0] = (val >> 8) & 0xffu;
rdata[1] = val & 0xffu;
}
return 0;
}
/* This is a special entry point that is entered if an I2C operation is
attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this
part doesn't work, but we know it is really there. So let's look for
the autodetect attempt and just return success if we see that. */
static int i2c_hack_wm8775(struct pvr2_hdw *hdw,
u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
{
if (!(rlen || wlen)) {
// This is a probe attempt. Just let it succeed.
return 0;
}
return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
}
/* This is an entry point designed to always fail any attempt to perform a
transfer. We use this to cause certain I2C addresses to not be
probed. */
static int i2c_black_hole(struct pvr2_hdw *hdw,
u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
{
return -EIO;
}
/* This is a special entry point that is entered if an I2C operation is
attempted to a cx25840 chip on model 24xxx hardware. This chip can
sometimes wedge itself. Worse still, when this happens msp3400 can
falsely detect this part and then the system gets hosed up after msp3400
gets confused and dies. What we want to do here is try to keep msp3400
away and also try to notice if the chip is wedged and send a warning to
the system log. */
static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
{
int ret;
unsigned int subaddr;
u8 wbuf[2];
int state = hdw->i2c_cx25840_hack_state;
if (!(rlen || wlen)) {
// Probe attempt - always just succeed and don't bother the
// hardware (this helps to make the state machine further
// down somewhat easier).
return 0;
}
if (state == 3) {
return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
}
/* We're looking for the exact pattern where the revision register
is being read. The cx25840 module will always look at the
revision register first. Any other pattern of access therefore
has to be a probe attempt from somebody else so we'll reject it.
Normally we could just let each client just probe the part
anyway, but when the cx25840 is wedged, msp3400 will get a false
positive and that just screws things up... */
if (wlen == 0) {
switch (state) {
case 1: subaddr = 0x0100; break;
case 2: subaddr = 0x0101; break;
default: goto fail;
}
} else if (wlen == 2) {
subaddr = (wdata[0] << 8) | wdata[1];
switch (subaddr) {
case 0x0100: state = 1; break;
case 0x0101: state = 2; break;
default: goto fail;
}
} else {
goto fail;
}
if (!rlen) goto success;
state = 0;
if (rlen != 1) goto fail;
/* If we get to here then we have a legitimate read for one of the
two revision bytes, so pass it through. */
wbuf[0] = subaddr >> 8;
wbuf[1] = subaddr;
ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen);
if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"WARNING: Detected a wedged cx25840 chip;"
" the device will not work.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"WARNING: Try power cycling the pvrusb2 device.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"WARNING: Disabling further access to the device"
" to prevent other foul-ups.");
// This blocks all further communication with the part.
hdw->i2c_func[0x44] = NULL;
pvr2_hdw_render_useless(hdw);
goto fail;
}
/* Success! */
pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK.");
state = 3;
success:
hdw->i2c_cx25840_hack_state = state;
return 0;
fail:
hdw->i2c_cx25840_hack_state = state;
return -EIO;
}
/* This is a very, very limited I2C adapter implementation. We can only
support what we actually know will work on the device... */
static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[],
int num)
{
int ret = -ENOTSUPP;
pvr2_i2c_func funcp = NULL;
struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data);
if (!num) {
ret = -EINVAL;
goto done;
}
if (msgs[0].addr < PVR2_I2C_FUNC_CNT) {
funcp = hdw->i2c_func[msgs[0].addr];
}
if (!funcp) {
ret = -EIO;
goto done;
}
if (num == 1) {
if (msgs[0].flags & I2C_M_RD) {
/* Simple read */
u16 tcnt,bcnt,offs;
if (!msgs[0].len) {
/* Length == 0 read. This is a probe. */
if (funcp(hdw,msgs[0].addr,NULL,0,NULL,0)) {
ret = -EIO;
goto done;
}
ret = 1;
goto done;
}
/* If the read is short enough we'll do the whole
thing atomically. Otherwise we have no choice
but to break apart the reads. */
tcnt = msgs[0].len;
offs = 0;
while (tcnt) {
bcnt = tcnt;
if (bcnt > sizeof(hdw->cmd_buffer)-1) {
bcnt = sizeof(hdw->cmd_buffer)-1;
}
if (funcp(hdw,msgs[0].addr,NULL,0,
msgs[0].buf+offs,bcnt)) {
ret = -EIO;
goto done;
}
offs += bcnt;
tcnt -= bcnt;
}
ret = 1;
goto done;
} else {
/* Simple write */
ret = 1;
if (funcp(hdw,msgs[0].addr,
msgs[0].buf,msgs[0].len,NULL,0)) {
ret = -EIO;
}
goto done;
}
} else if (num == 2) {
if (msgs[0].addr != msgs[1].addr) {
trace_i2c("i2c refusing 2 phase transfer with"
" conflicting target addresses");
ret = -ENOTSUPP;
goto done;
}
if ((!((msgs[0].flags & I2C_M_RD))) &&
(msgs[1].flags & I2C_M_RD)) {
u16 tcnt,bcnt,wcnt,offs;
/* Write followed by atomic read. If the read
portion is short enough we'll do the whole thing
atomically. Otherwise we have no choice but to
break apart the reads. */
tcnt = msgs[1].len;
wcnt = msgs[0].len;
offs = 0;
while (tcnt || wcnt) {
bcnt = tcnt;
if (bcnt > sizeof(hdw->cmd_buffer)-1) {
bcnt = sizeof(hdw->cmd_buffer)-1;
}
if (funcp(hdw,msgs[0].addr,
msgs[0].buf,wcnt,
msgs[1].buf+offs,bcnt)) {
ret = -EIO;
goto done;
}
offs += bcnt;
tcnt -= bcnt;
wcnt = 0;
}
ret = 2;
goto done;
} else {
trace_i2c("i2c refusing complex transfer"
" read0=%d read1=%d",
(msgs[0].flags & I2C_M_RD),
(msgs[1].flags & I2C_M_RD));
}
} else {
trace_i2c("i2c refusing %d phase transfer",num);
}
done:
if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) {
unsigned int idx,offs,cnt;
for (idx = 0; idx < num; idx++) {
cnt = msgs[idx].len;
printk(KERN_INFO
"pvrusb2 i2c xfer %u/%u:"
" addr=0x%x len=%d %s",
idx+1,num,
msgs[idx].addr,
cnt,
(msgs[idx].flags & I2C_M_RD ?
"read" : "write"));
if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
if (cnt > 8) cnt = 8;
printk(" [");
for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
if (offs) printk(" ");
printk("%02x",msgs[idx].buf[offs]);
}
if (offs < cnt) printk(" ...");
printk("]");
}
if (idx+1 == num) {
printk(" result=%d",ret);
}
printk("\n");
}
if (!num) {
printk(KERN_INFO
"pvrusb2 i2c xfer null transfer result=%d\n",
ret);
}
}
return ret;
}
static u32 pvr2_i2c_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
static struct i2c_algorithm pvr2_i2c_algo_template = {
.master_xfer = pvr2_i2c_xfer,
.functionality = pvr2_i2c_functionality,
};
static struct i2c_adapter pvr2_i2c_adap_template = {
.owner = THIS_MODULE,
.class = 0,
};
/* Return true if device exists at given address */
static int do_i2c_probe(struct pvr2_hdw *hdw, int addr)
{
struct i2c_msg msg[1];
int rc;
msg[0].addr = 0;
msg[0].flags = I2C_M_RD;
msg[0].len = 0;
msg[0].buf = NULL;
msg[0].addr = addr;
rc = i2c_transfer(&hdw->i2c_adap, msg, ARRAY_SIZE(msg));
return rc == 1;
}
static void do_i2c_scan(struct pvr2_hdw *hdw)
{
int i;
printk(KERN_INFO "%s: i2c scan beginning\n", hdw->name);
for (i = 0; i < 128; i++) {
if (do_i2c_probe(hdw, i)) {
printk(KERN_INFO "%s: i2c scan: found device @ 0x%x\n",
hdw->name, i);
}
}
printk(KERN_INFO "%s: i2c scan done.\n", hdw->name);
}
static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
{
struct i2c_board_info info;
struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
if (pvr2_disable_ir_video) {
pvr2_trace(PVR2_TRACE_INFO,
"Automatic binding of ir_video has been disabled.");
return;
}
memset(&info, 0, sizeof(struct i2c_board_info));
switch (hdw->ir_scheme_active) {
case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
init_data->type = RC_TYPE_RC5;
init_data->name = hdw->hdw_desc->description;
init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
/* IR Receiver */
info.addr = 0x18;
info.platform_data = init_data;
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
info.type, info.addr);
i2c_new_device(&hdw->i2c_adap, &info);
break;
case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_TYPE_RC5;
init_data->name = hdw->hdw_desc->description;
/* IR Receiver */
info.addr = 0x71;
info.platform_data = init_data;
strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
info.type, info.addr);
i2c_new_device(&hdw->i2c_adap, &info);
/* IR Trasmitter */
info.addr = 0x70;
info.platform_data = init_data;
strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
info.type, info.addr);
i2c_new_device(&hdw->i2c_adap, &info);
break;
default:
/* The device either doesn't support I2C-based IR or we
don't know (yet) how to operate IR on the device. */
break;
}
}
void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
{
unsigned int idx;
/* The default action for all possible I2C addresses is just to do
the transfer normally. */
for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) {
hdw->i2c_func[idx] = pvr2_i2c_basic_op;
}
/* However, deal with various special cases for 24xxx hardware. */
if (ir_mode[hdw->unit_number] == 0) {
printk(KERN_INFO "%s: IR disabled\n",hdw->name);
hdw->i2c_func[0x18] = i2c_black_hole;
} else if (ir_mode[hdw->unit_number] == 1) {
if (hdw->ir_scheme_active == PVR2_IR_SCHEME_24XXX) {
/* Set up translation so that our IR looks like a
29xxx device */
hdw->i2c_func[0x18] = i2c_24xxx_ir;
}
}
if (hdw->hdw_desc->flag_has_cx25840) {
hdw->i2c_func[0x44] = i2c_hack_cx25840;
}
if (hdw->hdw_desc->flag_has_wm8775) {
hdw->i2c_func[0x1b] = i2c_hack_wm8775;
}
// Configure the adapter and set up everything else related to it.
memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
hdw->i2c_adap.dev.parent = &hdw->usb_dev->dev;
hdw->i2c_adap.algo = &hdw->i2c_algo;
hdw->i2c_adap.algo_data = hdw;
hdw->i2c_linked = !0;
i2c_set_adapdata(&hdw->i2c_adap, &hdw->v4l2_dev);
i2c_add_adapter(&hdw->i2c_adap);
if (hdw->i2c_func[0x18] == i2c_24xxx_ir) {
/* Probe for a different type of IR receiver on this
device. This is really the only way to differentiate
older 24xxx devices from 24xxx variants that include an
IR blaster. If the IR blaster is present, the IR
receiver is part of that chip and thus we must disable
the emulated IR receiver. */
if (do_i2c_probe(hdw, 0x71)) {
pvr2_trace(PVR2_TRACE_INFO,
"Device has newer IR hardware;"
" disabling unneeded virtual IR device");
hdw->i2c_func[0x18] = NULL;
/* Remember that this is a different device... */
hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
}
}
if (i2c_scan) do_i2c_scan(hdw);
pvr2_i2c_register_ir(hdw);
}
void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
{
if (hdw->i2c_linked) {
i2c_del_adapter(&hdw->i2c_adap);
hdw->i2c_linked = 0;
}
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 75 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
UnknownzD/I9103_ICS_Kernel | arch/unicore32/kernel/module.c | 5239 | 2854 | /*
* linux/arch/unicore32/kernel/module.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
void *module_alloc(unsigned long size)
{
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!size)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
}
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relindex, struct module *module)
{
Elf32_Shdr *symsec = sechdrs + symindex;
Elf32_Shdr *relsec = sechdrs + relindex;
Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
Elf32_Rel *rel = (void *)relsec->sh_addr;
unsigned int i;
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
unsigned long loc;
Elf32_Sym *sym;
s32 offset;
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset >
(symsec->sh_size / sizeof(Elf32_Sym))) {
printk(KERN_ERR "%s: bad relocation, "
"section %d reloc %d\n",
module->name, relindex, i);
return -ENOEXEC;
}
sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
if (rel->r_offset < 0 || rel->r_offset >
dstsec->sh_size - sizeof(u32)) {
printk(KERN_ERR "%s: out of bounds relocation, "
"section %d reloc %d offset %d size %d\n",
module->name, relindex, i, rel->r_offset,
dstsec->sh_size);
return -ENOEXEC;
}
loc = dstsec->sh_addr + rel->r_offset;
switch (ELF32_R_TYPE(rel->r_info)) {
case R_UNICORE_NONE:
/* ignore */
break;
case R_UNICORE_ABS32:
*(u32 *)loc += sym->st_value;
break;
case R_UNICORE_PC24:
case R_UNICORE_CALL:
case R_UNICORE_JUMP24:
offset = (*(u32 *)loc & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
offset += sym->st_value - loc;
if (offset & 3 ||
offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) {
printk(KERN_ERR
"%s: relocation out of range, section "
"%d reloc %d sym '%s'\n", module->name,
relindex, i, strtab + sym->st_name);
return -ENOEXEC;
}
offset >>= 2;
*(u32 *)loc &= 0xff000000;
*(u32 *)loc |= offset & 0x00ffffff;
break;
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
return -ENOEXEC;
}
}
return 0;
}
| gpl-2.0 |
ASAZING/Android-Kernel-Gt-s7390l | drivers/net/wireless/hostap/hostap_80211_rx.c | 5239 | 32677 | #include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/lib80211.h>
#include <linux/if_arp.h>
#include "hostap_80211.h"
#include "hostap.h"
#include "hostap_ap.h"
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct ieee80211_hdr *hdr;
u16 fc;
hdr = (struct ieee80211_hdr *) skb->data;
printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
"jiffies=%ld\n",
name, rx_stats->signal, rx_stats->noise, rx_stats->rate,
skb->len, jiffies);
if (skb->len < 2)
return;
fc = le16_to_cpu(hdr->frame_control);
printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
(fc & IEEE80211_FCTL_STYPE) >> 4,
fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
if (skb->len < IEEE80211_DATA_HDR3_LEN) {
printk("\n");
return;
}
printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
le16_to_cpu(hdr->seq_ctrl));
printk(KERN_DEBUG " A1=%pM", hdr->addr1);
printk(" A2=%pM", hdr->addr2);
printk(" A3=%pM", hdr->addr3);
if (skb->len >= 30)
printk(" A4=%pM", hdr->addr4);
printk("\n");
}
/* Send RX frame to netif with 802.11 (and possible prism) header.
* Called from hardware or software IRQ context. */
int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats, int type)
{
struct hostap_interface *iface;
local_info_t *local;
int hdrlen, phdrlen, head_need, tail_need;
u16 fc;
int prism_header, ret;
struct ieee80211_hdr *fhdr;
iface = netdev_priv(dev);
local = iface->local;
if (dev->type == ARPHRD_IEEE80211_PRISM) {
if (local->monitor_type == PRISM2_MONITOR_PRISM) {
prism_header = 1;
phdrlen = sizeof(struct linux_wlan_ng_prism_hdr);
} else { /* local->monitor_type == PRISM2_MONITOR_CAPHDR */
prism_header = 2;
phdrlen = sizeof(struct linux_wlan_ng_cap_hdr);
}
} else if (dev->type == ARPHRD_IEEE80211_RADIOTAP) {
prism_header = 3;
phdrlen = sizeof(struct hostap_radiotap_rx);
} else {
prism_header = 0;
phdrlen = 0;
}
fhdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(fhdr->frame_control);
if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
printk(KERN_DEBUG "%s: dropped management frame with header "
"version %d\n", dev->name, fc & IEEE80211_FCTL_VERS);
dev_kfree_skb_any(skb);
return 0;
}
hdrlen = hostap_80211_get_hdrlen(fhdr->frame_control);
/* check if there is enough room for extra data; if not, expand skb
* buffer to be large enough for the changes */
head_need = phdrlen;
tail_need = 0;
#ifdef PRISM2_ADD_BOGUS_CRC
tail_need += 4;
#endif /* PRISM2_ADD_BOGUS_CRC */
head_need -= skb_headroom(skb);
tail_need -= skb_tailroom(skb);
if (head_need > 0 || tail_need > 0) {
if (pskb_expand_head(skb, head_need > 0 ? head_need : 0,
tail_need > 0 ? tail_need : 0,
GFP_ATOMIC)) {
printk(KERN_DEBUG "%s: prism2_rx_80211 failed to "
"reallocate skb buffer\n", dev->name);
dev_kfree_skb_any(skb);
return 0;
}
}
/* We now have an skb with enough head and tail room, so just insert
* the extra data */
#ifdef PRISM2_ADD_BOGUS_CRC
memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */
#endif /* PRISM2_ADD_BOGUS_CRC */
if (prism_header == 1) {
struct linux_wlan_ng_prism_hdr *hdr;
hdr = (struct linux_wlan_ng_prism_hdr *)
skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->msgcode = LWNG_CAP_DID_BASE;
hdr->msglen = sizeof(*hdr);
memcpy(hdr->devname, dev->name, sizeof(hdr->devname));
#define LWNG_SETVAL(f,i,s,l,d) \
hdr->f.did = LWNG_CAP_DID_BASE | (i << 12); \
hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
LWNG_SETVAL(hosttime, 1, 0, 4, jiffies);
LWNG_SETVAL(mactime, 2, 0, 4, rx_stats->mac_time);
LWNG_SETVAL(channel, 3, 1 /* no value */, 4, 0);
LWNG_SETVAL(rssi, 4, 1 /* no value */, 4, 0);
LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0);
LWNG_SETVAL(signal, 6, 0, 4, rx_stats->signal);
LWNG_SETVAL(noise, 7, 0, 4, rx_stats->noise);
LWNG_SETVAL(rate, 8, 0, 4, rx_stats->rate / 5);
LWNG_SETVAL(istx, 9, 0, 4, 0);
LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen);
#undef LWNG_SETVAL
} else if (prism_header == 2) {
struct linux_wlan_ng_cap_hdr *hdr;
hdr = (struct linux_wlan_ng_cap_hdr *)
skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->version = htonl(LWNG_CAPHDR_VERSION);
hdr->length = htonl(phdrlen);
hdr->mactime = __cpu_to_be64(rx_stats->mac_time);
hdr->hosttime = __cpu_to_be64(jiffies);
hdr->phytype = htonl(4); /* dss_dot11_b */
hdr->channel = htonl(local->channel);
hdr->datarate = htonl(rx_stats->rate);
hdr->antenna = htonl(0); /* unknown */
hdr->priority = htonl(0); /* unknown */
hdr->ssi_type = htonl(3); /* raw */
hdr->ssi_signal = htonl(rx_stats->signal);
hdr->ssi_noise = htonl(rx_stats->noise);
hdr->preamble = htonl(0); /* unknown */
hdr->encoding = htonl(1); /* cck */
} else if (prism_header == 3) {
struct hostap_radiotap_rx *hdr;
hdr = (struct hostap_radiotap_rx *)skb_push(skb, phdrlen);
memset(hdr, 0, phdrlen);
hdr->hdr.it_len = cpu_to_le16(phdrlen);
hdr->hdr.it_present =
cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE));
hdr->tsft = cpu_to_le64(rx_stats->mac_time);
hdr->chan_freq = cpu_to_le16(freq_list[local->channel - 1]);
hdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_CCK |
IEEE80211_CHAN_2GHZ);
hdr->rate = rx_stats->rate / 5;
hdr->dbm_antsignal = rx_stats->signal;
hdr->dbm_antnoise = rx_stats->noise;
}
ret = skb->len - phdrlen;
skb->dev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, hdrlen);
if (prism_header)
skb_pull(skb, phdrlen);
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = cpu_to_be16(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
return ret;
}
/* Called only as a tasklet (software IRQ) */
static void monitor_rx(struct net_device *dev, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
int len;
len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
/* Called only as a tasklet (software IRQ) */
static struct prism2_frag_entry *
prism2_frag_cache_find(local_info_t *local, unsigned int seq,
unsigned int frag, u8 *src, u8 *dst)
{
struct prism2_frag_entry *entry;
int i;
for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
entry = &local->frag_cache[i];
if (entry->skb != NULL &&
time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
printk(KERN_DEBUG "%s: expiring fragment cache entry "
"seq=%u last_frag=%u\n",
local->dev->name, entry->seq, entry->last_frag);
dev_kfree_skb(entry->skb);
entry->skb = NULL;
}
if (entry->skb != NULL && entry->seq == seq &&
(entry->last_frag + 1 == frag || frag == -1) &&
memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
return entry;
}
return NULL;
}
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *
prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
{
struct sk_buff *skb = NULL;
u16 sc;
unsigned int frag, seq;
struct prism2_frag_entry *entry;
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(local->dev->mtu +
sizeof(struct ieee80211_hdr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ + ETH_ALEN /* WDS */);
if (skb == NULL)
return NULL;
entry = &local->frag_cache[local->frag_next_idx];
local->frag_next_idx++;
if (local->frag_next_idx >= PRISM2_FRAG_CACHE_LEN)
local->frag_next_idx = 0;
if (entry->skb != NULL)
dev_kfree_skb(entry->skb);
entry->first_frag_time = jiffies;
entry->seq = seq;
entry->last_frag = frag;
entry->skb = skb;
memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
} else {
/* received a fragment of a frame for which the head fragment
* should have already been received */
entry = prism2_frag_cache_find(local, seq, frag, hdr->addr2,
hdr->addr1);
if (entry != NULL) {
entry->last_frag = frag;
skb = entry->skb;
}
}
return skb;
}
/* Called only as a tasklet (software IRQ) */
static int prism2_frag_cache_invalidate(local_info_t *local,
struct ieee80211_hdr *hdr)
{
u16 sc;
unsigned int seq;
struct prism2_frag_entry *entry;
sc = le16_to_cpu(hdr->seq_ctrl);
seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
entry = prism2_frag_cache_find(local, seq, -1, hdr->addr2, hdr->addr1);
if (entry == NULL) {
printk(KERN_DEBUG "%s: could not invalidate fragment cache "
"entry (seq=%u)\n",
local->dev->name, seq);
return -1;
}
entry->skb = NULL;
return 0;
}
static struct hostap_bss_info *__hostap_get_bss(local_info_t *local, u8 *bssid,
u8 *ssid, size_t ssid_len)
{
struct list_head *ptr;
struct hostap_bss_info *bss;
list_for_each(ptr, &local->bss_list) {
bss = list_entry(ptr, struct hostap_bss_info, list);
if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0 &&
(ssid == NULL ||
(ssid_len == bss->ssid_len &&
memcmp(ssid, bss->ssid, ssid_len) == 0))) {
list_move(&bss->list, &local->bss_list);
return bss;
}
}
return NULL;
}
static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
u8 *ssid, size_t ssid_len)
{
struct hostap_bss_info *bss;
if (local->num_bss_info >= HOSTAP_MAX_BSS_COUNT) {
bss = list_entry(local->bss_list.prev,
struct hostap_bss_info, list);
list_del(&bss->list);
local->num_bss_info--;
} else {
bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
if (bss == NULL)
return NULL;
}
memset(bss, 0, sizeof(*bss));
memcpy(bss->bssid, bssid, ETH_ALEN);
memcpy(bss->ssid, ssid, ssid_len);
bss->ssid_len = ssid_len;
local->num_bss_info++;
list_add(&bss->list, &local->bss_list);
return bss;
}
static void __hostap_expire_bss(local_info_t *local)
{
struct hostap_bss_info *bss;
while (local->num_bss_info > 0) {
bss = list_entry(local->bss_list.prev,
struct hostap_bss_info, list);
if (!time_after(jiffies, bss->last_update + 60 * HZ))
break;
list_del(&bss->list);
local->num_bss_info--;
kfree(bss);
}
}
/* Both IEEE 802.11 Beacon and Probe Response frames have similar structure, so
* the same routine can be used to parse both of them. */
static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
int stype)
{
struct hostap_ieee80211_mgmt *mgmt;
int left, chan = 0;
u8 *pos;
u8 *ssid = NULL, *wpa = NULL, *rsn = NULL;
size_t ssid_len = 0, wpa_len = 0, rsn_len = 0;
struct hostap_bss_info *bss;
if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon))
return;
mgmt = (struct hostap_ieee80211_mgmt *) skb->data;
pos = mgmt->u.beacon.variable;
left = skb->len - (pos - skb->data);
while (left >= 2) {
if (2 + pos[1] > left)
return; /* parse failed */
switch (*pos) {
case WLAN_EID_SSID:
ssid = pos + 2;
ssid_len = pos[1];
break;
case WLAN_EID_GENERIC:
if (pos[1] >= 4 &&
pos[2] == 0x00 && pos[3] == 0x50 &&
pos[4] == 0xf2 && pos[5] == 1) {
wpa = pos;
wpa_len = pos[1] + 2;
}
break;
case WLAN_EID_RSN:
rsn = pos;
rsn_len = pos[1] + 2;
break;
case WLAN_EID_DS_PARAMS:
if (pos[1] >= 1)
chan = pos[2];
break;
}
left -= 2 + pos[1];
pos += 2 + pos[1];
}
if (wpa_len > MAX_WPA_IE_LEN)
wpa_len = MAX_WPA_IE_LEN;
if (rsn_len > MAX_WPA_IE_LEN)
rsn_len = MAX_WPA_IE_LEN;
if (ssid_len > sizeof(bss->ssid))
ssid_len = sizeof(bss->ssid);
spin_lock(&local->lock);
bss = __hostap_get_bss(local, mgmt->bssid, ssid, ssid_len);
if (bss == NULL)
bss = __hostap_add_bss(local, mgmt->bssid, ssid, ssid_len);
if (bss) {
bss->last_update = jiffies;
bss->count++;
bss->capab_info = le16_to_cpu(mgmt->u.beacon.capab_info);
if (wpa) {
memcpy(bss->wpa_ie, wpa, wpa_len);
bss->wpa_ie_len = wpa_len;
} else
bss->wpa_ie_len = 0;
if (rsn) {
memcpy(bss->rsn_ie, rsn, rsn_len);
bss->rsn_ie_len = rsn_len;
} else
bss->rsn_ie_len = 0;
bss->chan = chan;
}
__hostap_expire_bss(local);
spin_unlock(&local->lock);
}
static int
hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats, u16 type,
u16 stype)
{
if (local->iw_mode == IW_MODE_MASTER)
hostap_update_sta_ps(local, (struct ieee80211_hdr *) skb->data);
if (local->hostapd && type == IEEE80211_FTYPE_MGMT) {
if (stype == IEEE80211_STYPE_BEACON &&
local->iw_mode == IW_MODE_MASTER) {
struct sk_buff *skb2;
/* Process beacon frames also in kernel driver to
* update STA(AP) table statistics */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
hostap_rx(skb2->dev, skb2, rx_stats);
}
/* send management frames to the user space daemon for
* processing */
local->apdevstats.rx_packets++;
local->apdevstats.rx_bytes += skb->len;
if (local->apdev == NULL)
return -1;
prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT);
return 0;
}
if (local->iw_mode == IW_MODE_MASTER) {
if (type != IEEE80211_FTYPE_MGMT &&
type != IEEE80211_FTYPE_CTL) {
printk(KERN_DEBUG "%s: unknown management frame "
"(type=0x%02x, stype=0x%02x) dropped\n",
skb->dev->name, type >> 2, stype >> 4);
return -1;
}
hostap_rx(skb->dev, skb, rx_stats);
return 0;
} else if (type == IEEE80211_FTYPE_MGMT &&
(stype == IEEE80211_STYPE_BEACON ||
stype == IEEE80211_STYPE_PROBE_RESP)) {
hostap_rx_sta_beacon(local, skb, stype);
return -1;
} else if (type == IEEE80211_FTYPE_MGMT &&
(stype == IEEE80211_STYPE_ASSOC_RESP ||
stype == IEEE80211_STYPE_REASSOC_RESP)) {
/* Ignore (Re)AssocResp silently since these are not currently
* needed but are still received when WPA/RSN mode is enabled.
*/
return -1;
} else {
printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: dropped unhandled"
" management frame in non-Host AP mode (type=%d:%d)\n",
skb->dev->name, type >> 2, stype >> 4);
return -1;
}
}
/* Called only as a tasklet (software IRQ) */
static struct net_device *prism2_rx_get_wds(local_info_t *local,
u8 *addr)
{
struct hostap_interface *iface = NULL;
struct list_head *ptr;
read_lock_bh(&local->iface_lock);
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
if (iface->type == HOSTAP_INTERFACE_WDS &&
memcmp(iface->u.wds.remote_addr, addr, ETH_ALEN) == 0)
break;
iface = NULL;
}
read_unlock_bh(&local->iface_lock);
return iface ? iface->dev : NULL;
}
static int
hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
struct net_device **wds)
{
/* FIX: is this really supposed to accept WDS frames only in Master
* mode? What about Repeater or Managed with WDS frames? */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) !=
(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS) &&
(local->iw_mode != IW_MODE_MASTER || !(fc & IEEE80211_FCTL_TODS)))
return 0; /* not a WDS frame */
/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
* or own non-standard frame with 4th address after payload */
if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
(hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
/* RA (or BSSID) is not ours - drop */
PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with "
"not own or broadcast %s=%pM\n",
local->dev->name,
fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",
hdr->addr1);
return -1;
}
/* check if the frame came from a registered WDS connection */
*wds = prism2_rx_get_wds(local, hdr->addr2);
if (*wds == NULL && fc & IEEE80211_FCTL_FROMDS &&
(local->iw_mode != IW_MODE_INFRA ||
!(local->wds_type & HOSTAP_WDS_AP_CLIENT) ||
memcmp(hdr->addr2, local->bssid, ETH_ALEN) != 0)) {
/* require that WDS link has been registered with TA or the
* frame is from current AP when using 'AP client mode' */
PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame "
"from unknown TA=%pM\n",
local->dev->name, hdr->addr2);
if (local->ap && local->ap->autom_ap_wds)
hostap_wds_link_oper(local, hdr->addr2, WDS_ADD);
return -1;
}
if (*wds && !(fc & IEEE80211_FCTL_FROMDS) && local->ap &&
hostap_is_sta_assoc(local->ap, hdr->addr2)) {
/* STA is actually associated with us even though it has a
* registered WDS link. Assume it is in 'AP client' mode.
* Since this is a 3-addr frame, assume it is not (bogus) WDS
* frame and process it like any normal ToDS frame from
* associated STA. */
*wds = NULL;
}
return 0;
}
static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
{
struct net_device *dev = local->dev;
u16 fc, ethertype;
struct ieee80211_hdr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
hdr = (struct ieee80211_hdr *) skb->data;
fc = le16_to_cpu(hdr->frame_control);
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
/* FromDS frame with own addr as DA */
} else
return 0;
if (skb->len < 24 + 8)
return 0;
/* check for port access entity Ethernet type */
pos = skb->data + 24;
ethertype = (pos[6] << 8) | pos[7];
if (ethertype == ETH_P_PAE)
return 1;
return 0;
}
/* Called only as a tasklet (software IRQ) */
static int
hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
struct lib80211_crypt_data *crypt)
{
struct ieee80211_hdr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
return 0;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
if (local->tkip_countermeasures &&
strcmp(crypt->ops->name, "TKIP") == 0) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"received packet from %pM\n",
local->dev->name, hdr->addr2);
}
return -1;
}
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: decryption failed (SA=%pM) res=%d\n",
local->dev->name, hdr->addr2, res);
local->comm_tallies.rx_discards_wep_undecryptable++;
return -1;
}
return res;
}
/* Called only as a tasklet (software IRQ) */
static int
hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
int keyidx, struct lib80211_crypt_data *crypt)
{
struct ieee80211_hdr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
return 0;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
" (SA=%pM keyidx=%d)\n",
local->dev->name, hdr->addr2, keyidx);
return -1;
}
return 0;
}
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats)
{
struct hostap_interface *iface;
local_info_t *local;
struct ieee80211_hdr *hdr;
size_t hdrlen;
u16 fc, type, stype, sc;
struct net_device *wds = NULL;
unsigned int frag;
u8 *payload;
struct sk_buff *skb2 = NULL;
u16 ethertype;
int frame_authorized = 0;
int from_assoc_ap = 0;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
struct lib80211_crypt_data *crypt = NULL;
void *sta = NULL;
int keyidx = 0;
iface = netdev_priv(dev);
local = iface->local;
iface->stats.rx_packets++;
iface->stats.rx_bytes += skb->len;
/* dev is the master radio device; change this to be the default
* virtual interface (this may be changed to WDS device below) */
dev = local->ddev;
iface = netdev_priv(dev);
hdr = (struct ieee80211_hdr *) skb->data;
if (skb->len < 10)
goto rx_dropped;
fc = le16_to_cpu(hdr->frame_control);
type = fc & IEEE80211_FCTL_FTYPE;
stype = fc & IEEE80211_FCTL_STYPE;
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
/* If spy monitoring on */
if (iface->spy_data.spy_number > 0) {
struct iw_quality wstats;
wstats.level = rx_stats->signal;
wstats.noise = rx_stats->noise;
wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED
| IW_QUAL_QUAL_INVALID | IW_QUAL_DBM;
/* Update spy records */
wireless_spy_update(dev, hdr->addr2, &wstats);
}
#endif /* IW_WIRELESS_SPY */
hostap_update_rx_stats(local->ap, hdr, rx_stats);
if (local->iw_mode == IW_MODE_MONITOR) {
monitor_rx(dev, skb, rx_stats);
return;
}
if (local->host_decrypt) {
int idx = 0;
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
crypt = local->crypt_info.crypt[idx];
sta = NULL;
/* Use station specific key to override default keys if the
* receiver address is a unicast address ("individual RA"). If
* bcrx_sta_key parameter is set, station specific key is used
* even with broad/multicast targets (this is against IEEE
* 802.11, but makes it easier to use different keys with
* stations that do not support WEP key mapping). */
if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
(void) hostap_handle_sta_crypto(local, hdr, &crypt,
&sta);
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
if (crypt && (crypt->ops == NULL ||
crypt->ops->decrypt_mpdu == NULL))
crypt = NULL;
if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
#if 0
/* This seems to be triggered by some (multicast?)
* frames from other than current BSS, so just drop the
* frames silently instead of filling system log with
* these reports. */
printk(KERN_DEBUG "%s: WEP decryption failed (not set)"
" (SA=%pM)\n",
local->dev->name, hdr->addr2);
#endif
local->comm_tallies.rx_discards_wep_undecryptable++;
goto rx_dropped;
}
}
if (type != IEEE80211_FTYPE_DATA) {
if (type == IEEE80211_FTYPE_MGMT &&
stype == IEEE80211_STYPE_AUTH &&
fc & IEEE80211_FCTL_PROTECTED && local->host_decrypt &&
(keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
{
printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
"from %pM\n", dev->name, hdr->addr2);
/* TODO: could inform hostapd about this so that it
* could send auth failure report */
goto rx_dropped;
}
if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype))
goto rx_dropped;
else
goto rx_exit;
}
/* Data frame - extract src/dst addresses */
if (skb->len < IEEE80211_DATA_HDR3_LEN)
goto rx_dropped;
switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_FROMDS:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr3, ETH_ALEN);
break;
case IEEE80211_FCTL_TODS:
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
break;
case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
if (skb->len < IEEE80211_DATA_HDR4_LEN)
goto rx_dropped;
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr4, ETH_ALEN);
break;
case 0:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
break;
}
if (hostap_rx_frame_wds(local, hdr, fc, &wds))
goto rx_dropped;
if (wds)
skb->dev = dev = wds;
if (local->iw_mode == IW_MODE_MASTER && !wds &&
(fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
local->stadev &&
memcmp(hdr->addr2, local->assoc_ap_addr, ETH_ALEN) == 0) {
/* Frame from BSSID of the AP for which we are a client */
skb->dev = dev = local->stadev;
from_assoc_ap = 1;
}
if ((local->iw_mode == IW_MODE_MASTER ||
local->iw_mode == IW_MODE_REPEAT) &&
!from_assoc_ap) {
switch (hostap_handle_sta_rx(local, dev, skb, rx_stats,
wds != NULL)) {
case AP_RX_CONTINUE_NOT_AUTHORIZED:
frame_authorized = 0;
break;
case AP_RX_CONTINUE:
frame_authorized = 1;
break;
case AP_RX_DROP:
goto rx_dropped;
case AP_RX_EXIT:
goto rx_exit;
}
}
/* Nullfunc frames may have PS-bit set, so they must be passed to
* hostap_handle_sta_rx() before being dropped here. */
if (stype != IEEE80211_STYPE_DATA &&
stype != IEEE80211_STYPE_DATA_CFACK &&
stype != IEEE80211_STYPE_DATA_CFPOLL &&
stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
if (stype != IEEE80211_STYPE_NULLFUNC)
printk(KERN_DEBUG "%s: RX: dropped data frame "
"with no data (type=0x%02x, subtype=0x%02x)\n",
dev->name, type >> 2, stype >> 4);
goto rx_dropped;
}
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
(keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
goto rx_dropped;
hdr = (struct ieee80211_hdr *) skb->data;
/* skb: hdr + (possibly fragmented) plaintext payload */
if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
(frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb =
prism2_frag_cache_get(local, hdr);
if (!frag_skb) {
printk(KERN_DEBUG "%s: Rx cannot get skb from "
"fragment cache (morefrag=%d seq=%u frag=%u)\n",
dev->name, (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
(sc & IEEE80211_SCTL_SEQ) >> 4, frag);
goto rx_dropped;
}
flen = skb->len;
if (frag != 0)
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
printk(KERN_WARNING "%s: host decrypted and "
"reassembled frame did not fit skb\n",
dev->name);
prism2_frag_cache_invalidate(local, hdr);
goto rx_dropped;
}
if (frag == 0) {
/* copy first fragment (including full headers) into
* beginning of the fragment cache skb */
skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
flen);
} else {
/* append frame payload to the end of the fragment
* cache skb */
skb_copy_from_linear_data_offset(skb, hdrlen,
skb_put(frag_skb,
flen), flen);
}
dev_kfree_skb(skb);
skb = NULL;
if (fc & IEEE80211_FCTL_MOREFRAGS) {
/* more fragments expected - leave the skb in fragment
* cache for now; it will be delivered to upper layers
* after all fragments have been received */
goto rx_exit;
}
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache */
skb = frag_skb;
hdr = (struct ieee80211_hdr *) skb->data;
prism2_frag_cache_invalidate(local, hdr);
}
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated */
if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
goto rx_dropped;
hdr = (struct ieee80211_hdr *) skb->data;
if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
if (local->ieee_802_1x &&
hostap_is_eapol_frame(local, skb)) {
/* pass unencrypted EAPOL frames even if encryption is
* configured */
PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X - passing "
"unencrypted EAPOL frame\n", local->dev->name);
} else {
printk(KERN_DEBUG "%s: encryption configured, but RX "
"frame not encrypted (SA=%pM)\n",
local->dev->name, hdr->addr2);
goto rx_dropped;
}
}
if (local->drop_unencrypted && !(fc & IEEE80211_FCTL_PROTECTED) &&
!hostap_is_eapol_frame(local, skb)) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: dropped unencrypted RX data "
"frame from %pM (drop_unencrypted=1)\n",
dev->name, hdr->addr2);
}
goto rx_dropped;
}
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
ethertype = (payload[6] << 8) | payload[7];
/* If IEEE 802.1X is used, check whether the port is authorized to send
* the received frame. */
if (local->ieee_802_1x && local->iw_mode == IW_MODE_MASTER) {
if (ethertype == ETH_P_PAE) {
PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X frame\n",
dev->name);
if (local->hostapd && local->apdev) {
/* Send IEEE 802.1X frames to the user
* space daemon for processing */
prism2_rx_80211(local->apdev, skb, rx_stats,
PRISM2_RX_MGMT);
local->apdevstats.rx_packets++;
local->apdevstats.rx_bytes += skb->len;
goto rx_exit;
}
} else if (!frame_authorized) {
printk(KERN_DEBUG "%s: dropped frame from "
"unauthorized port (IEEE 802.1X): "
"ethertype=0x%04x\n",
dev->name, ethertype);
goto rx_dropped;
}
}
/* convert hdr + possible LLC headers into Ethernet header */
if (skb->len - hdrlen >= 8 &&
((memcmp(payload, rfc1042_header, 6) == 0 &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
memcmp(payload, bridge_tunnel_header, 6) == 0)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
skb_pull(skb, hdrlen + 6);
memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
} else {
__be16 len;
/* Leave Ethernet header part of hdr and full payload */
skb_pull(skb, hdrlen);
len = htons(skb->len);
memcpy(skb_push(skb, 2), &len, 2);
memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
}
if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS) &&
skb->len >= ETH_HLEN + ETH_ALEN) {
/* Non-standard frame: get addr4 from its bogus location after
* the payload */
skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
skb->data + ETH_ALEN,
ETH_ALEN);
skb_trim(skb, skb->len - ETH_ALEN);
}
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
if (local->iw_mode == IW_MODE_MASTER && !wds &&
local->ap->bridge_packets) {
if (dst[0] & 0x01) {
/* copy multicast frame both to the higher layers and
* to the wireless media */
local->ap->bridged_multicast++;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
printk(KERN_DEBUG "%s: skb_clone failed for "
"multicast frame\n", dev->name);
} else if (hostap_is_sta_authorized(local->ap, dst)) {
/* send frame directly to the associated STA using
* wireless media and not passing to higher layers */
local->ap->bridged_unicast++;
skb2 = skb;
skb = NULL;
}
}
if (skb2 != NULL) {
/* send to wireless media */
skb2->dev = dev;
skb2->protocol = cpu_to_be16(ETH_P_802_3);
skb_reset_mac_header(skb2);
skb_reset_network_header(skb2);
/* skb2->network_header += ETH_HLEN; */
dev_queue_xmit(skb2);
}
if (skb) {
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
rx_exit:
if (sta)
hostap_handle_sta_release(sta);
return;
rx_dropped:
dev_kfree_skb(skb);
dev->stats.rx_dropped++;
goto rx_exit;
}
EXPORT_SYMBOL(hostap_80211_rx);
| gpl-2.0 |
boa19861105/BOA-A4TW | drivers/media/video/videobuf-vmalloc.c | 8311 | 8530 | /*
* helper functions for vmalloc video4linux capture buffers
*
* The functions expect the hardware being able to scatter gather
* (i.e. the buffers are not linear in physical memory, but fragmented
* into PAGE_SIZE chunks). They also assume the driver does not need
* to touch the video data.
*
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <media/videobuf-vmalloc.h>
#define MAGIC_DMABUF 0x17760309
#define MAGIC_VMAL_MEM 0x18221223
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
is, should); \
BUG(); \
}
static int debug;
module_param(debug, int, 0644);
MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
#define dprintk(level, fmt, arg...) \
if (debug >= level) \
printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
/***************************************************************************/
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dprintk(1, "%s: buf[%d] freeing (%p)\n",
__func__, i, mem->vaddr);
vfree(mem->vaddr);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
return;
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/* ---------------------------------------------------------------------
* vmalloc handlers for the generic methods
*/
/* Allocated area consists on 3 parts:
struct video_buffer
struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
struct videobuf_dma_sg_memory
*/
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_VMAL_MEM;
dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
mem, (long)sizeof(*mem));
return vb;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_vmalloc_memory *mem = vb->priv;
int pages;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dprintk(1, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
printk(KERN_ERR "memory is not alloced/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
pages = PAGE_ALIGN(vb->size);
dprintk(1, "%s memory method USERPTR\n", __func__);
if (vb->baddr) {
printk(KERN_ERR "USERPTR is currently not supported\n");
return -EINVAL;
}
/* The only USERPTR currently supported is the one needed for
* read() method.
*/
mem->vaddr = vmalloc_user(pages);
if (!mem->vaddr) {
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr %p (%d pages)\n",
mem->vaddr, pages);
#if 0
int rc;
/* Kernel userptr is used also by read() method. In this case,
there's no need to remap, since data will be copied to user
*/
if (!vb->baddr)
return 0;
/* FIXME: to properly support USERPTR, remap should occur.
The code below won't work, since mem->vma = NULL
*/
/* Try to remap memory */
rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
if (rc < 0) {
printk(KERN_ERR "mmap: remap failed with error %d", rc);
return -ENOMEM;
}
#endif
break;
case V4L2_MEMORY_OVERLAY:
default:
dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
printk(KERN_ERR "Memory method currently unsupported.\n");
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_mapping *map;
int retval, pages;
dprintk(1, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
mem->vaddr = vmalloc_user(pages);
if (!mem->vaddr) {
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
goto error;
}
dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
/* Try to remap memory */
retval = remap_vmalloc_range(vma, mem->vaddr, 0);
if (retval < 0) {
printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
vfree(mem->vaddr);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize,
vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
mem = NULL;
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = videobuf_to_vmalloc,
};
void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
return mem->vaddr;
}
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
void videobuf_vmalloc_free(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
vfree(mem->vaddr);
mem->vaddr = NULL;
return;
}
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
| gpl-2.0 |
mrimp/S5_NewSon_Kernel | drivers/acpi/utils.c | 9079 | 9468 | /*
* acpi_utils.c - ACPI Utility Functions ($Revision: 10 $)
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "internal.h"
#define _COMPONENT ACPI_BUS_COMPONENT
ACPI_MODULE_NAME("utils");
/* --------------------------------------------------------------------------
Object Evaluation Helpers
-------------------------------------------------------------------------- */
static void
acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
{
#ifdef ACPI_DEBUG_OUTPUT
char prefix[80] = {'\0'};
struct acpi_buffer buffer = {sizeof(prefix), prefix};
acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
(char *) prefix, p, acpi_format_exception(s)));
#else
return;
#endif
}
acpi_status
acpi_extract_package(union acpi_object *package,
struct acpi_buffer *format, struct acpi_buffer *buffer)
{
u32 size_required = 0;
u32 tail_offset = 0;
char *format_string = NULL;
u32 format_count = 0;
u32 i = 0;
u8 *head = NULL;
u8 *tail = NULL;
if (!package || (package->type != ACPI_TYPE_PACKAGE)
|| (package->package.count < 1)) {
printk(KERN_WARNING PREFIX "Invalid package argument\n");
return AE_BAD_PARAMETER;
}
if (!format || !format->pointer || (format->length < 1)) {
printk(KERN_WARNING PREFIX "Invalid format argument\n");
return AE_BAD_PARAMETER;
}
if (!buffer) {
printk(KERN_WARNING PREFIX "Invalid buffer argument\n");
return AE_BAD_PARAMETER;
}
format_count = (format->length / sizeof(char)) - 1;
if (format_count > package->package.count) {
printk(KERN_WARNING PREFIX "Format specifies more objects [%d]"
" than exist in package [%d].\n",
format_count, package->package.count);
return AE_BAD_DATA;
}
format_string = format->pointer;
/*
* Calculate size_required.
*/
for (i = 0; i < format_count; i++) {
union acpi_object *element = &(package->package.elements[i]);
if (!element) {
return AE_BAD_DATA;
}
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
size_required += sizeof(u64);
tail_offset += sizeof(u64);
break;
case 'S':
size_required +=
sizeof(char *) + sizeof(u64) +
sizeof(char);
tail_offset += sizeof(char *);
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d]: got number, expecing"
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
size_required +=
sizeof(char *) +
(element->string.length * sizeof(char)) +
sizeof(char);
tail_offset += sizeof(char *);
break;
case 'B':
size_required +=
sizeof(u8 *) +
(element->buffer.length * sizeof(u8));
tail_offset += sizeof(u8 *);
break;
default:
printk(KERN_WARNING PREFIX "Invalid package element"
" [%d] got string/buffer,"
" expecing [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
break;
}
break;
case ACPI_TYPE_PACKAGE:
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found unsupported element at index=%d\n",
i));
/* TBD: handle nested packages... */
return AE_SUPPORT;
break;
}
}
/*
* Validate output buffer.
*/
if (buffer->length < size_required) {
buffer->length = size_required;
return AE_BUFFER_OVERFLOW;
} else if (buffer->length != size_required || !buffer->pointer) {
return AE_BAD_PARAMETER;
}
head = buffer->pointer;
tail = buffer->pointer + tail_offset;
/*
* Extract package data.
*/
for (i = 0; i < format_count; i++) {
u8 **pointer = NULL;
union acpi_object *element = &(package->package.elements[i]);
if (!element) {
return AE_BAD_DATA;
}
switch (element->type) {
case ACPI_TYPE_INTEGER:
switch (format_string[i]) {
case 'N':
*((u64 *) head) =
element->integer.value;
head += sizeof(u64);
break;
case 'S':
pointer = (u8 **) head;
*pointer = tail;
*((u64 *) tail) =
element->integer.value;
head += sizeof(u64 *);
tail += sizeof(u64);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
switch (format_string[i]) {
case 'S':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->string.pointer,
element->string.length);
head += sizeof(char *);
tail += element->string.length * sizeof(char);
/* NULL terminate string */
*tail = (char)0;
tail += sizeof(char);
break;
case 'B':
pointer = (u8 **) head;
*pointer = tail;
memcpy(tail, element->buffer.pointer,
element->buffer.length);
head += sizeof(u8 *);
tail += element->buffer.length * sizeof(u8);
break;
default:
/* Should never get here */
break;
}
break;
case ACPI_TYPE_PACKAGE:
/* TBD: handle nested packages... */
default:
/* Should never get here */
break;
}
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_extract_package);
acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data)
{
acpi_status status = AE_OK;
union acpi_object element;
struct acpi_buffer buffer = { 0, NULL };
if (!data)
return AE_BAD_PARAMETER;
buffer.length = sizeof(union acpi_object);
buffer.pointer = &element;
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status)) {
acpi_util_eval_error(handle, pathname, status);
return status;
}
if (element.type != ACPI_TYPE_INTEGER) {
acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
return AE_BAD_DATA;
}
*data = element.integer.value;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
return AE_OK;
}
EXPORT_SYMBOL(acpi_evaluate_integer);
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
struct acpi_handle_list *list)
{
acpi_status status = AE_OK;
union acpi_object *package = NULL;
union acpi_object *element = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u32 i = 0;
if (!list) {
return AE_BAD_PARAMETER;
}
/* Evaluate object. */
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status))
goto end;
package = buffer.pointer;
if ((buffer.length == 0) || !package) {
printk(KERN_ERR PREFIX "No return object (len %X ptr %p)\n",
(unsigned)buffer.length, package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->type != ACPI_TYPE_PACKAGE) {
printk(KERN_ERR PREFIX "Expecting a [Package], found type %X\n",
package->type);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (!package->package.count) {
printk(KERN_ERR PREFIX "[Package] has zero elements (%p)\n",
package);
status = AE_BAD_DATA;
acpi_util_eval_error(handle, pathname, status);
goto end;
}
if (package->package.count > ACPI_MAX_HANDLES) {
return AE_NO_MEMORY;
}
list->count = package->package.count;
/* Extract package data. */
for (i = 0; i < list->count; i++) {
element = &(package->package.elements[i]);
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
printk(KERN_ERR PREFIX
"Expecting a [Reference] package element, found type %X\n",
element->type);
acpi_util_eval_error(handle, pathname, status);
break;
}
if (!element->reference.handle) {
printk(KERN_WARNING PREFIX "Invalid reference in"
" package %s\n", pathname);
status = AE_NULL_ENTRY;
break;
}
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found reference [%p]\n",
list->handles[i]));
}
end:
if (ACPI_FAILURE(status)) {
list->count = 0;
//kfree(list->handles);
}
kfree(buffer.pointer);
return status;
}
EXPORT_SYMBOL(acpi_evaluate_reference);
| gpl-2.0 |
MassStash/htc_m8_kernel_GPE_6.0 | drivers/isdn/hysdn/hysdn_sched.c | 9847 | 7095 | /* $Id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai Exp $
*
* Linux driver for HYSDN cards
* scheduler routines for handling exchange card <-> pc.
*
* Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
* Copyright 1999 by Werner Cornelius (werner@titro.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hysdn_defs.h"
/*****************************************************************************/
/* hysdn_sched_rx is called from the cards handler to announce new data is */
/* available from the card. The routine has to handle the data and return */
/* with a nonzero code if the data could be worked (or even thrown away), if */
/* no room to buffer the data is available a zero return tells the card */
/* to keep the data until later. */
/*****************************************************************************/
int
hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len,
unsigned short chan)
{
switch (chan) {
case CHAN_NDIS_DATA:
if (hynet_enable & (1 << card->myid)) {
/* give packet to network handler */
hysdn_rx_netpkt(card, buf, len);
}
break;
case CHAN_ERRLOG:
hysdn_card_errlog(card, (tErrLogEntry *) buf, len);
if (card->err_log_state == ERRLOG_STATE_ON)
card->err_log_state = ERRLOG_STATE_START; /* start new fetch */
break;
#ifdef CONFIG_HYSDN_CAPI
case CHAN_CAPI:
/* give packet to CAPI handler */
if (hycapi_enable & (1 << card->myid)) {
hycapi_rx_capipkt(card, buf, len);
}
break;
#endif /* CONFIG_HYSDN_CAPI */
default:
printk(KERN_INFO "irq message channel %d len %d unhandled \n", chan, len);
break;
} /* switch rx channel */
return (1); /* always handled */
} /* hysdn_sched_rx */
/*****************************************************************************/
/* hysdn_sched_tx is called from the cards handler to announce that there is */
/* room in the tx-buffer to the card and data may be sent if needed. */
/* If the routine wants to send data it must fill buf, len and chan with the */
/* appropriate data and return a nonzero value. With a zero return no new */
/* data to send is assumed. maxlen specifies the buffer size available for */
/* sending. */
/*****************************************************************************/
int
hysdn_sched_tx(hysdn_card *card, unsigned char *buf,
unsigned short volatile *len, unsigned short volatile *chan,
unsigned short maxlen)
{
struct sk_buff *skb;
if (card->net_tx_busy) {
card->net_tx_busy = 0; /* reset flag */
hysdn_tx_netack(card); /* acknowledge packet send */
} /* a network packet has completely been transferred */
/* first of all async requests are handled */
if (card->async_busy) {
if (card->async_len <= maxlen) {
memcpy(buf, card->async_data, card->async_len);
*len = card->async_len;
*chan = card->async_channel;
card->async_busy = 0; /* reset request */
return (1);
}
card->async_busy = 0; /* in case of length error */
} /* async request */
if ((card->err_log_state == ERRLOG_STATE_START) &&
(maxlen >= ERRLOG_CMD_REQ_SIZE)) {
strcpy(buf, ERRLOG_CMD_REQ); /* copy the command */
*len = ERRLOG_CMD_REQ_SIZE; /* buffer length */
*chan = CHAN_ERRLOG; /* and channel */
card->err_log_state = ERRLOG_STATE_ON; /* new state is on */
return (1); /* tell that data should be send */
} /* error log start and able to send */
if ((card->err_log_state == ERRLOG_STATE_STOP) &&
(maxlen >= ERRLOG_CMD_STOP_SIZE)) {
strcpy(buf, ERRLOG_CMD_STOP); /* copy the command */
*len = ERRLOG_CMD_STOP_SIZE; /* buffer length */
*chan = CHAN_ERRLOG; /* and channel */
card->err_log_state = ERRLOG_STATE_OFF; /* new state is off */
return (1); /* tell that data should be send */
} /* error log start and able to send */
/* now handle network interface packets */
if ((hynet_enable & (1 << card->myid)) &&
(skb = hysdn_tx_netget(card)) != NULL)
{
if (skb->len <= maxlen) {
/* copy the packet to the buffer */
skb_copy_from_linear_data(skb, buf, skb->len);
*len = skb->len;
*chan = CHAN_NDIS_DATA;
card->net_tx_busy = 1; /* we are busy sending network data */
return (1); /* go and send the data */
} else
hysdn_tx_netack(card); /* aknowledge packet -> throw away */
} /* send a network packet if available */
#ifdef CONFIG_HYSDN_CAPI
if (((hycapi_enable & (1 << card->myid))) &&
((skb = hycapi_tx_capiget(card)) != NULL))
{
if (skb->len <= maxlen) {
skb_copy_from_linear_data(skb, buf, skb->len);
*len = skb->len;
*chan = CHAN_CAPI;
hycapi_tx_capiack(card);
return (1); /* go and send the data */
}
}
#endif /* CONFIG_HYSDN_CAPI */
return (0); /* nothing to send */
} /* hysdn_sched_tx */
/*****************************************************************************/
/* send one config line to the card and return 0 if successful, otherwise a */
/* negative error code. */
/* The function works with timeouts perhaps not giving the greatest speed */
/* sending the line, but this should be meaningless because only some lines */
/* are to be sent and this happens very seldom. */
/*****************************************************************************/
int
hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
{
int cnt = 50; /* timeout intervalls */
unsigned long flags;
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
while (card->async_busy) {
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg delayed");
msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt)
return (-ERR_ASYNC_TIME); /* timed out */
} /* wait for buffer to become free */
spin_lock_irqsave(&card->hysdn_lock, flags);
strcpy(card->async_data, line);
card->async_len = strlen(line) + 1;
card->async_channel = chan;
card->async_busy = 1; /* request transfer */
/* now queue the task */
schedule_work(&card->irq_queue);
spin_unlock_irqrestore(&card->hysdn_lock, flags);
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg data queued");
cnt++; /* short delay */
while (card->async_busy) {
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg waiting for tx-ready");
msleep_interruptible(20); /* Timeout 20ms */
if (!--cnt)
return (-ERR_ASYNC_TIME); /* timed out */
} /* wait for buffer to become free again */
if (card->debug_flags & LOG_SCHED_ASYN)
hysdn_addlog(card, "async tx-cfg data send");
return (0); /* line send correctly */
} /* hysdn_tx_cfgline */
| gpl-2.0 |
bsmitty83/B-Team4.3 | drivers/net/wireless/wl1251/tx.c | 10871 | 13649 | /*
* This file is part of wl1251
*
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "wl1251.h"
#include "reg.h"
#include "tx.h"
#include "ps.h"
#include "io.h"
static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
{
int used, data_in_count;
data_in_count = wl->data_in_count;
if (data_in_count < data_out_count)
/* data_in_count has wrapped */
data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
used = data_in_count - data_out_count;
WARN_ON(used < 0);
WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
return true;
else
return false;
}
static int wl1251_tx_path_status(struct wl1251 *wl)
{
u32 status, addr, data_out_count;
bool busy;
addr = wl->data_path->tx_control_addr;
status = wl1251_mem_read32(wl, addr);
data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
if (busy)
return -EBUSY;
return 0;
}
static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
{
int i;
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
if (wl->tx_frames[i] == NULL) {
wl->tx_frames[i] = skb;
return i;
}
return -EBUSY;
}
static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
struct ieee80211_tx_info *control, u16 fc)
{
*(u16 *)&tx_hdr->control = 0;
tx_hdr->control.rate_policy = 0;
/* 802.11 packets */
tx_hdr->control.packet_type = 0;
if (control->flags & IEEE80211_TX_CTL_NO_ACK)
tx_hdr->control.ack_policy = 1;
tx_hdr->control.tx_complete = 1;
if ((fc & IEEE80211_FTYPE_DATA) &&
((fc & IEEE80211_STYPE_QOS_DATA) ||
(fc & IEEE80211_STYPE_QOS_NULLFUNC)))
tx_hdr->control.qos = 1;
}
/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
#define MAX_MSDU_SECURITY_LENGTH 16
#define MAX_MPDU_SECURITY_LENGTH 16
#define WLAN_QOS_HDR_LEN 26
#define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
WLAN_QOS_HDR_LEN)
#define HW_BLOCK_SIZE 252
static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
{
u16 payload_len, frag_threshold, mem_blocks;
u16 num_mpdus, mem_blocks_per_frag;
frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH;
if (payload_len > frag_threshold) {
mem_blocks_per_frag =
((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
HW_BLOCK_SIZE) + 1;
num_mpdus = payload_len / frag_threshold;
mem_blocks = num_mpdus * mem_blocks_per_frag;
payload_len -= num_mpdus * frag_threshold;
num_mpdus++;
} else {
mem_blocks_per_frag = 0;
mem_blocks = 0;
num_mpdus = 1;
}
mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
if (num_mpdus > 1)
mem_blocks += min(num_mpdus, mem_blocks_per_frag);
tx_hdr->num_mem_blocks = mem_blocks;
}
static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *control)
{
struct tx_double_buffer_desc *tx_hdr;
struct ieee80211_rate *rate;
int id;
u16 fc;
if (!skb)
return -EINVAL;
id = wl1251_tx_id(wl, skb);
if (id < 0)
return id;
fc = *(u16 *)skb->data;
tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb,
sizeof(*tx_hdr));
tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
rate = ieee80211_get_tx_rate(wl->hw, control);
tx_hdr->rate = cpu_to_le16(rate->hw_value);
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
return 0;
}
/* We copy the packet to the target */
static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *control)
{
struct tx_double_buffer_desc *tx_hdr;
int len;
u32 addr;
if (!skb)
return -EINVAL;
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
if (control->control.hw_key &&
control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen;
__le16 fc;
u16 length;
u8 *pos;
fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE;
tx_hdr->length = cpu_to_le16(length);
hdrlen = ieee80211_hdrlen(fc);
pos = skb_push(skb, WL1251_TKIP_IV_SPACE);
memmove(pos, pos + WL1251_TKIP_IV_SPACE,
sizeof(*tx_hdr) + hdrlen);
}
/* Revisit. This is a workaround for getting non-aligned packets.
This happens at least with EAPOL packets from the user space.
Our DMA requires packets to be aligned on a 4-byte boundary.
*/
if (unlikely((long)skb->data & 0x03)) {
int offset = (4 - (long)skb->data) & 0x03;
wl1251_debug(DEBUG_TX, "skb offset %d", offset);
/* check whether the current skb can be used */
if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
GFP_KERNEL);
if (unlikely(newskb == NULL)) {
wl1251_error("Can't allocate skb!");
return -EINVAL;
}
tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
dev_kfree_skb_any(skb);
wl->tx_frames[tx_hdr->id] = skb = newskb;
offset = (4 - (long)skb->data) & 0x03;
wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
}
/* align the buffer on a 4-byte boundary */
if (offset) {
unsigned char *src = skb->data;
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
}
}
/* Our skb->data at this point includes the HW header */
len = WL1251_TX_ALIGN(skb->len);
if (wl->data_in_count & 0x1)
addr = wl->data_path->tx_packet_ring_addr +
wl->data_path->tx_packet_ring_chunk_size;
else
addr = wl->data_path->tx_packet_ring_addr;
wl1251_mem_write(wl, addr, skb->data, len);
wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
"queue %d", tx_hdr->id, skb, tx_hdr->length,
tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
static void wl1251_tx_trigger(struct wl1251 *wl)
{
u32 data, addr;
if (wl->data_in_count & 0x1) {
addr = ACX_REG_INTERRUPT_TRIG_H;
data = INTR_TRIG_TX_PROC1;
} else {
addr = ACX_REG_INTERRUPT_TRIG;
data = INTR_TRIG_TX_PROC0;
}
wl1251_reg_write32(wl, addr, data);
/* Bumping data in */
wl->data_in_count = (wl->data_in_count + 1) &
TX_STATUS_DATA_OUT_COUNT_MASK;
}
/* caller must hold wl->mutex */
static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int ret = 0;
u8 idx;
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key) {
idx = info->control.hw_key->hw_key_idx;
if (unlikely(wl->default_key != idx)) {
ret = wl1251_acx_default_key(wl, idx);
if (ret < 0)
return ret;
}
}
ret = wl1251_tx_path_status(wl);
if (ret < 0)
return ret;
ret = wl1251_tx_fill_hdr(wl, skb, info);
if (ret < 0)
return ret;
ret = wl1251_tx_send_packet(wl, skb, info);
if (ret < 0)
return ret;
wl1251_tx_trigger(wl);
return ret;
}
void wl1251_tx_work(struct work_struct *work)
{
struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
struct sk_buff *skb;
bool woken_up = false;
int ret;
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1251_STATE_OFF))
goto out;
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
woken_up = true;
}
ret = wl1251_tx_frame(wl, skb);
if (ret == -EBUSY) {
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
}
}
out:
if (woken_up)
wl1251_ps_elp_sleep(wl);
mutex_unlock(&wl->mutex);
}
static const char *wl1251_tx_parse_status(u8 status)
{
/* 8 bit status field, one character per bit plus null */
static char buf[9];
int i = 0;
memset(buf, 0, sizeof(buf));
if (status & TX_DMA_ERROR)
buf[i++] = 'm';
if (status & TX_DISABLED)
buf[i++] = 'd';
if (status & TX_RETRY_EXCEEDED)
buf[i++] = 'r';
if (status & TX_TIMEOUT)
buf[i++] = 't';
if (status & TX_KEY_NOT_FOUND)
buf[i++] = 'k';
if (status & TX_ENCRYPT_FAIL)
buf[i++] = 'e';
if (status & TX_UNAVAILABLE_PRIORITY)
buf[i++] = 'p';
/* bit 0 is unused apparently */
return buf;
}
static void wl1251_tx_packet_cb(struct wl1251 *wl,
struct tx_result *result)
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int hdrlen;
u8 *frame;
skb = wl->tx_frames[result->id];
if (skb == NULL) {
wl1251_error("SKB for packet %d is NULL", result->id);
return;
}
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(result->status == TX_SUCCESS))
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = result->ack_failures + 1;
wl->stats.retry_count += result->ack_failures;
/*
* We have to remove our private TX header before pushing
* the skb back to mac80211.
*/
frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
skb_pull(skb, WL1251_TKIP_IV_SPACE);
}
wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
" status 0x%x (%s)",
result->id, skb, result->ack_failures, result->rate,
result->status, wl1251_tx_parse_status(result->status));
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
}
/* Called upon reception of a TX complete interrupt */
void wl1251_tx_complete(struct wl1251 *wl)
{
int i, result_index, num_complete = 0, queue_len;
struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
unsigned long flags;
if (unlikely(wl->state != WL1251_STATE_ON))
return;
/* First we read the result */
wl1251_mem_read(wl, wl->data_path->tx_complete_addr,
result, sizeof(result));
result_index = wl->next_tx_complete;
for (i = 0; i < ARRAY_SIZE(result); i++) {
result_ptr = &result[result_index];
if (result_ptr->done_1 == 1 &&
result_ptr->done_2 == 1) {
wl1251_tx_packet_cb(wl, result_ptr);
result_ptr->done_1 = 0;
result_ptr->done_2 = 0;
result_index = (result_index + 1) &
(FW_TX_CMPLT_BLOCK_SIZE - 1);
num_complete++;
} else {
break;
}
}
queue_len = skb_queue_len(&wl->tx_queue);
if ((num_complete > 0) && (queue_len > 0)) {
/* firmware buffer has space, reschedule tx_work */
wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
ieee80211_queue_work(wl->hw, &wl->tx_work);
}
if (wl->tx_queue_stopped &&
queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
/* tx_queue has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* Every completed frame needs to be acknowledged */
if (num_complete) {
/*
* If we've wrapped, we have to clear
* the results in 2 steps.
*/
if (result_index > wl->next_tx_complete) {
/* Only 1 write is needed */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr +
(wl->next_tx_complete *
sizeof(struct tx_result)),
&result[wl->next_tx_complete],
num_complete *
sizeof(struct tx_result));
} else if (result_index < wl->next_tx_complete) {
/* 2 writes are needed */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr +
(wl->next_tx_complete *
sizeof(struct tx_result)),
&result[wl->next_tx_complete],
(FW_TX_CMPLT_BLOCK_SIZE -
wl->next_tx_complete) *
sizeof(struct tx_result));
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr,
result,
(num_complete -
FW_TX_CMPLT_BLOCK_SIZE +
wl->next_tx_complete) *
sizeof(struct tx_result));
} else {
/* We have to write the whole array */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr,
result,
FW_TX_CMPLT_BLOCK_SIZE *
sizeof(struct tx_result));
}
}
wl->next_tx_complete = result_index;
}
/* caller must hold wl->mutex */
void wl1251_tx_flush(struct wl1251 *wl)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
/* TX failure */
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
info = IEEE80211_SKB_CB(skb);
wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
continue;
ieee80211_tx_status(wl->hw, skb);
}
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
continue;
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[i] = NULL;
}
}
| gpl-2.0 |
hazard209/Charge_Kernel | drivers/video/msm/mdp_scale_tables.c | 12919 | 20631 | /* drivers/video/msm_fb/mdp_scale_tables.c
*
* Copyright (C) 2007 QUALCOMM Incorporated
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "mdp_scale_tables.h"
#include "mdp_hw.h"
struct mdp_table_entry mdp_upscale_table[] = {
{ 0x5fffc, 0x0 },
{ 0x50200, 0x7fc00000 },
{ 0x5fffc, 0xff80000d },
{ 0x50204, 0x7ec003f9 },
{ 0x5fffc, 0xfec0001c },
{ 0x50208, 0x7d4003f3 },
{ 0x5fffc, 0xfe40002b },
{ 0x5020c, 0x7b8003ed },
{ 0x5fffc, 0xfd80003c },
{ 0x50210, 0x794003e8 },
{ 0x5fffc, 0xfcc0004d },
{ 0x50214, 0x76c003e4 },
{ 0x5fffc, 0xfc40005f },
{ 0x50218, 0x73c003e0 },
{ 0x5fffc, 0xfb800071 },
{ 0x5021c, 0x708003de },
{ 0x5fffc, 0xfac00085 },
{ 0x50220, 0x6d0003db },
{ 0x5fffc, 0xfa000098 },
{ 0x50224, 0x698003d9 },
{ 0x5fffc, 0xf98000ac },
{ 0x50228, 0x654003d8 },
{ 0x5fffc, 0xf8c000c1 },
{ 0x5022c, 0x610003d7 },
{ 0x5fffc, 0xf84000d5 },
{ 0x50230, 0x5c8003d7 },
{ 0x5fffc, 0xf7c000e9 },
{ 0x50234, 0x580003d7 },
{ 0x5fffc, 0xf74000fd },
{ 0x50238, 0x534003d8 },
{ 0x5fffc, 0xf6c00112 },
{ 0x5023c, 0x4e8003d8 },
{ 0x5fffc, 0xf6800126 },
{ 0x50240, 0x494003da },
{ 0x5fffc, 0xf600013a },
{ 0x50244, 0x448003db },
{ 0x5fffc, 0xf600014d },
{ 0x50248, 0x3f4003dd },
{ 0x5fffc, 0xf5c00160 },
{ 0x5024c, 0x3a4003df },
{ 0x5fffc, 0xf5c00172 },
{ 0x50250, 0x354003e1 },
{ 0x5fffc, 0xf5c00184 },
{ 0x50254, 0x304003e3 },
{ 0x5fffc, 0xf6000195 },
{ 0x50258, 0x2b0003e6 },
{ 0x5fffc, 0xf64001a6 },
{ 0x5025c, 0x260003e8 },
{ 0x5fffc, 0xf6c001b4 },
{ 0x50260, 0x214003eb },
{ 0x5fffc, 0xf78001c2 },
{ 0x50264, 0x1c4003ee },
{ 0x5fffc, 0xf80001cf },
{ 0x50268, 0x17c003f1 },
{ 0x5fffc, 0xf90001db },
{ 0x5026c, 0x134003f3 },
{ 0x5fffc, 0xfa0001e5 },
{ 0x50270, 0xf0003f6 },
{ 0x5fffc, 0xfb4001ee },
{ 0x50274, 0xac003f9 },
{ 0x5fffc, 0xfcc001f5 },
{ 0x50278, 0x70003fb },
{ 0x5fffc, 0xfe4001fb },
{ 0x5027c, 0x34003fe },
};
static struct mdp_table_entry mdp_downscale_x_table_PT2TOPT4[] = {
{ 0x5fffc, 0x740008c },
{ 0x50280, 0x33800088 },
{ 0x5fffc, 0x800008e },
{ 0x50284, 0x33400084 },
{ 0x5fffc, 0x8400092 },
{ 0x50288, 0x33000080 },
{ 0x5fffc, 0x9000094 },
{ 0x5028c, 0x3300007b },
{ 0x5fffc, 0x9c00098 },
{ 0x50290, 0x32400077 },
{ 0x5fffc, 0xa40009b },
{ 0x50294, 0x32000073 },
{ 0x5fffc, 0xb00009d },
{ 0x50298, 0x31c0006f },
{ 0x5fffc, 0xbc000a0 },
{ 0x5029c, 0x3140006b },
{ 0x5fffc, 0xc8000a2 },
{ 0x502a0, 0x31000067 },
{ 0x5fffc, 0xd8000a5 },
{ 0x502a4, 0x30800062 },
{ 0x5fffc, 0xe4000a8 },
{ 0x502a8, 0x2fc0005f },
{ 0x5fffc, 0xec000aa },
{ 0x502ac, 0x2fc0005b },
{ 0x5fffc, 0xf8000ad },
{ 0x502b0, 0x2f400057 },
{ 0x5fffc, 0x108000b0 },
{ 0x502b4, 0x2e400054 },
{ 0x5fffc, 0x114000b2 },
{ 0x502b8, 0x2e000050 },
{ 0x5fffc, 0x124000b4 },
{ 0x502bc, 0x2d80004c },
{ 0x5fffc, 0x130000b6 },
{ 0x502c0, 0x2d000049 },
{ 0x5fffc, 0x140000b8 },
{ 0x502c4, 0x2c800045 },
{ 0x5fffc, 0x150000b9 },
{ 0x502c8, 0x2c000042 },
{ 0x5fffc, 0x15c000bd },
{ 0x502cc, 0x2b40003e },
{ 0x5fffc, 0x16c000bf },
{ 0x502d0, 0x2a80003b },
{ 0x5fffc, 0x17c000bf },
{ 0x502d4, 0x2a000039 },
{ 0x5fffc, 0x188000c2 },
{ 0x502d8, 0x29400036 },
{ 0x5fffc, 0x19c000c4 },
{ 0x502dc, 0x28800032 },
{ 0x5fffc, 0x1ac000c5 },
{ 0x502e0, 0x2800002f },
{ 0x5fffc, 0x1bc000c7 },
{ 0x502e4, 0x2740002c },
{ 0x5fffc, 0x1cc000c8 },
{ 0x502e8, 0x26c00029 },
{ 0x5fffc, 0x1dc000c9 },
{ 0x502ec, 0x26000027 },
{ 0x5fffc, 0x1ec000cc },
{ 0x502f0, 0x25000024 },
{ 0x5fffc, 0x200000cc },
{ 0x502f4, 0x24800021 },
{ 0x5fffc, 0x210000cd },
{ 0x502f8, 0x23800020 },
{ 0x5fffc, 0x220000ce },
{ 0x502fc, 0x2300001d },
};
static struct mdp_table_entry mdp_downscale_x_table_PT4TOPT6[] = {
{ 0x5fffc, 0x740008c },
{ 0x50280, 0x33800088 },
{ 0x5fffc, 0x800008e },
{ 0x50284, 0x33400084 },
{ 0x5fffc, 0x8400092 },
{ 0x50288, 0x33000080 },
{ 0x5fffc, 0x9000094 },
{ 0x5028c, 0x3300007b },
{ 0x5fffc, 0x9c00098 },
{ 0x50290, 0x32400077 },
{ 0x5fffc, 0xa40009b },
{ 0x50294, 0x32000073 },
{ 0x5fffc, 0xb00009d },
{ 0x50298, 0x31c0006f },
{ 0x5fffc, 0xbc000a0 },
{ 0x5029c, 0x3140006b },
{ 0x5fffc, 0xc8000a2 },
{ 0x502a0, 0x31000067 },
{ 0x5fffc, 0xd8000a5 },
{ 0x502a4, 0x30800062 },
{ 0x5fffc, 0xe4000a8 },
{ 0x502a8, 0x2fc0005f },
{ 0x5fffc, 0xec000aa },
{ 0x502ac, 0x2fc0005b },
{ 0x5fffc, 0xf8000ad },
{ 0x502b0, 0x2f400057 },
{ 0x5fffc, 0x108000b0 },
{ 0x502b4, 0x2e400054 },
{ 0x5fffc, 0x114000b2 },
{ 0x502b8, 0x2e000050 },
{ 0x5fffc, 0x124000b4 },
{ 0x502bc, 0x2d80004c },
{ 0x5fffc, 0x130000b6 },
{ 0x502c0, 0x2d000049 },
{ 0x5fffc, 0x140000b8 },
{ 0x502c4, 0x2c800045 },
{ 0x5fffc, 0x150000b9 },
{ 0x502c8, 0x2c000042 },
{ 0x5fffc, 0x15c000bd },
{ 0x502cc, 0x2b40003e },
{ 0x5fffc, 0x16c000bf },
{ 0x502d0, 0x2a80003b },
{ 0x5fffc, 0x17c000bf },
{ 0x502d4, 0x2a000039 },
{ 0x5fffc, 0x188000c2 },
{ 0x502d8, 0x29400036 },
{ 0x5fffc, 0x19c000c4 },
{ 0x502dc, 0x28800032 },
{ 0x5fffc, 0x1ac000c5 },
{ 0x502e0, 0x2800002f },
{ 0x5fffc, 0x1bc000c7 },
{ 0x502e4, 0x2740002c },
{ 0x5fffc, 0x1cc000c8 },
{ 0x502e8, 0x26c00029 },
{ 0x5fffc, 0x1dc000c9 },
{ 0x502ec, 0x26000027 },
{ 0x5fffc, 0x1ec000cc },
{ 0x502f0, 0x25000024 },
{ 0x5fffc, 0x200000cc },
{ 0x502f4, 0x24800021 },
{ 0x5fffc, 0x210000cd },
{ 0x502f8, 0x23800020 },
{ 0x5fffc, 0x220000ce },
{ 0x502fc, 0x2300001d },
};
static struct mdp_table_entry mdp_downscale_x_table_PT6TOPT8[] = {
{ 0x5fffc, 0xfe000070 },
{ 0x50280, 0x4bc00068 },
{ 0x5fffc, 0xfe000078 },
{ 0x50284, 0x4bc00060 },
{ 0x5fffc, 0xfe000080 },
{ 0x50288, 0x4b800059 },
{ 0x5fffc, 0xfe000089 },
{ 0x5028c, 0x4b000052 },
{ 0x5fffc, 0xfe400091 },
{ 0x50290, 0x4a80004b },
{ 0x5fffc, 0xfe40009a },
{ 0x50294, 0x4a000044 },
{ 0x5fffc, 0xfe8000a3 },
{ 0x50298, 0x4940003d },
{ 0x5fffc, 0xfec000ac },
{ 0x5029c, 0x48400037 },
{ 0x5fffc, 0xff0000b4 },
{ 0x502a0, 0x47800031 },
{ 0x5fffc, 0xff8000bd },
{ 0x502a4, 0x4640002b },
{ 0x5fffc, 0xc5 },
{ 0x502a8, 0x45000026 },
{ 0x5fffc, 0x8000ce },
{ 0x502ac, 0x43800021 },
{ 0x5fffc, 0x10000d6 },
{ 0x502b0, 0x4240001c },
{ 0x5fffc, 0x18000df },
{ 0x502b4, 0x40800018 },
{ 0x5fffc, 0x24000e6 },
{ 0x502b8, 0x3f000014 },
{ 0x5fffc, 0x30000ee },
{ 0x502bc, 0x3d400010 },
{ 0x5fffc, 0x40000f5 },
{ 0x502c0, 0x3b80000c },
{ 0x5fffc, 0x50000fc },
{ 0x502c4, 0x39800009 },
{ 0x5fffc, 0x6000102 },
{ 0x502c8, 0x37c00006 },
{ 0x5fffc, 0x7000109 },
{ 0x502cc, 0x35800004 },
{ 0x5fffc, 0x840010e },
{ 0x502d0, 0x33800002 },
{ 0x5fffc, 0x9800114 },
{ 0x502d4, 0x31400000 },
{ 0x5fffc, 0xac00119 },
{ 0x502d8, 0x2f4003fe },
{ 0x5fffc, 0xc40011e },
{ 0x502dc, 0x2d0003fc },
{ 0x5fffc, 0xdc00121 },
{ 0x502e0, 0x2b0003fb },
{ 0x5fffc, 0xf400125 },
{ 0x502e4, 0x28c003fa },
{ 0x5fffc, 0x11000128 },
{ 0x502e8, 0x268003f9 },
{ 0x5fffc, 0x12c0012a },
{ 0x502ec, 0x244003f9 },
{ 0x5fffc, 0x1480012c },
{ 0x502f0, 0x224003f8 },
{ 0x5fffc, 0x1640012e },
{ 0x502f4, 0x200003f8 },
{ 0x5fffc, 0x1800012f },
{ 0x502f8, 0x1e0003f8 },
{ 0x5fffc, 0x1a00012f },
{ 0x502fc, 0x1c0003f8 },
};
static struct mdp_table_entry mdp_downscale_x_table_PT8TO1[] = {
{ 0x5fffc, 0x0 },
{ 0x50280, 0x7fc00000 },
{ 0x5fffc, 0xff80000d },
{ 0x50284, 0x7ec003f9 },
{ 0x5fffc, 0xfec0001c },
{ 0x50288, 0x7d4003f3 },
{ 0x5fffc, 0xfe40002b },
{ 0x5028c, 0x7b8003ed },
{ 0x5fffc, 0xfd80003c },
{ 0x50290, 0x794003e8 },
{ 0x5fffc, 0xfcc0004d },
{ 0x50294, 0x76c003e4 },
{ 0x5fffc, 0xfc40005f },
{ 0x50298, 0x73c003e0 },
{ 0x5fffc, 0xfb800071 },
{ 0x5029c, 0x708003de },
{ 0x5fffc, 0xfac00085 },
{ 0x502a0, 0x6d0003db },
{ 0x5fffc, 0xfa000098 },
{ 0x502a4, 0x698003d9 },
{ 0x5fffc, 0xf98000ac },
{ 0x502a8, 0x654003d8 },
{ 0x5fffc, 0xf8c000c1 },
{ 0x502ac, 0x610003d7 },
{ 0x5fffc, 0xf84000d5 },
{ 0x502b0, 0x5c8003d7 },
{ 0x5fffc, 0xf7c000e9 },
{ 0x502b4, 0x580003d7 },
{ 0x5fffc, 0xf74000fd },
{ 0x502b8, 0x534003d8 },
{ 0x5fffc, 0xf6c00112 },
{ 0x502bc, 0x4e8003d8 },
{ 0x5fffc, 0xf6800126 },
{ 0x502c0, 0x494003da },
{ 0x5fffc, 0xf600013a },
{ 0x502c4, 0x448003db },
{ 0x5fffc, 0xf600014d },
{ 0x502c8, 0x3f4003dd },
{ 0x5fffc, 0xf5c00160 },
{ 0x502cc, 0x3a4003df },
{ 0x5fffc, 0xf5c00172 },
{ 0x502d0, 0x354003e1 },
{ 0x5fffc, 0xf5c00184 },
{ 0x502d4, 0x304003e3 },
{ 0x5fffc, 0xf6000195 },
{ 0x502d8, 0x2b0003e6 },
{ 0x5fffc, 0xf64001a6 },
{ 0x502dc, 0x260003e8 },
{ 0x5fffc, 0xf6c001b4 },
{ 0x502e0, 0x214003eb },
{ 0x5fffc, 0xf78001c2 },
{ 0x502e4, 0x1c4003ee },
{ 0x5fffc, 0xf80001cf },
{ 0x502e8, 0x17c003f1 },
{ 0x5fffc, 0xf90001db },
{ 0x502ec, 0x134003f3 },
{ 0x5fffc, 0xfa0001e5 },
{ 0x502f0, 0xf0003f6 },
{ 0x5fffc, 0xfb4001ee },
{ 0x502f4, 0xac003f9 },
{ 0x5fffc, 0xfcc001f5 },
{ 0x502f8, 0x70003fb },
{ 0x5fffc, 0xfe4001fb },
{ 0x502fc, 0x34003fe },
};
struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX] = {
[MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_x_table_PT2TOPT4,
[MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_x_table_PT4TOPT6,
[MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_x_table_PT6TOPT8,
[MDP_DOWNSCALE_PT8TO1] = mdp_downscale_x_table_PT8TO1,
};
static struct mdp_table_entry mdp_downscale_y_table_PT2TOPT4[] = {
{ 0x5fffc, 0x740008c },
{ 0x50300, 0x33800088 },
{ 0x5fffc, 0x800008e },
{ 0x50304, 0x33400084 },
{ 0x5fffc, 0x8400092 },
{ 0x50308, 0x33000080 },
{ 0x5fffc, 0x9000094 },
{ 0x5030c, 0x3300007b },
{ 0x5fffc, 0x9c00098 },
{ 0x50310, 0x32400077 },
{ 0x5fffc, 0xa40009b },
{ 0x50314, 0x32000073 },
{ 0x5fffc, 0xb00009d },
{ 0x50318, 0x31c0006f },
{ 0x5fffc, 0xbc000a0 },
{ 0x5031c, 0x3140006b },
{ 0x5fffc, 0xc8000a2 },
{ 0x50320, 0x31000067 },
{ 0x5fffc, 0xd8000a5 },
{ 0x50324, 0x30800062 },
{ 0x5fffc, 0xe4000a8 },
{ 0x50328, 0x2fc0005f },
{ 0x5fffc, 0xec000aa },
{ 0x5032c, 0x2fc0005b },
{ 0x5fffc, 0xf8000ad },
{ 0x50330, 0x2f400057 },
{ 0x5fffc, 0x108000b0 },
{ 0x50334, 0x2e400054 },
{ 0x5fffc, 0x114000b2 },
{ 0x50338, 0x2e000050 },
{ 0x5fffc, 0x124000b4 },
{ 0x5033c, 0x2d80004c },
{ 0x5fffc, 0x130000b6 },
{ 0x50340, 0x2d000049 },
{ 0x5fffc, 0x140000b8 },
{ 0x50344, 0x2c800045 },
{ 0x5fffc, 0x150000b9 },
{ 0x50348, 0x2c000042 },
{ 0x5fffc, 0x15c000bd },
{ 0x5034c, 0x2b40003e },
{ 0x5fffc, 0x16c000bf },
{ 0x50350, 0x2a80003b },
{ 0x5fffc, 0x17c000bf },
{ 0x50354, 0x2a000039 },
{ 0x5fffc, 0x188000c2 },
{ 0x50358, 0x29400036 },
{ 0x5fffc, 0x19c000c4 },
{ 0x5035c, 0x28800032 },
{ 0x5fffc, 0x1ac000c5 },
{ 0x50360, 0x2800002f },
{ 0x5fffc, 0x1bc000c7 },
{ 0x50364, 0x2740002c },
{ 0x5fffc, 0x1cc000c8 },
{ 0x50368, 0x26c00029 },
{ 0x5fffc, 0x1dc000c9 },
{ 0x5036c, 0x26000027 },
{ 0x5fffc, 0x1ec000cc },
{ 0x50370, 0x25000024 },
{ 0x5fffc, 0x200000cc },
{ 0x50374, 0x24800021 },
{ 0x5fffc, 0x210000cd },
{ 0x50378, 0x23800020 },
{ 0x5fffc, 0x220000ce },
{ 0x5037c, 0x2300001d },
};
static struct mdp_table_entry mdp_downscale_y_table_PT4TOPT6[] = {
{ 0x5fffc, 0x740008c },
{ 0x50300, 0x33800088 },
{ 0x5fffc, 0x800008e },
{ 0x50304, 0x33400084 },
{ 0x5fffc, 0x8400092 },
{ 0x50308, 0x33000080 },
{ 0x5fffc, 0x9000094 },
{ 0x5030c, 0x3300007b },
{ 0x5fffc, 0x9c00098 },
{ 0x50310, 0x32400077 },
{ 0x5fffc, 0xa40009b },
{ 0x50314, 0x32000073 },
{ 0x5fffc, 0xb00009d },
{ 0x50318, 0x31c0006f },
{ 0x5fffc, 0xbc000a0 },
{ 0x5031c, 0x3140006b },
{ 0x5fffc, 0xc8000a2 },
{ 0x50320, 0x31000067 },
{ 0x5fffc, 0xd8000a5 },
{ 0x50324, 0x30800062 },
{ 0x5fffc, 0xe4000a8 },
{ 0x50328, 0x2fc0005f },
{ 0x5fffc, 0xec000aa },
{ 0x5032c, 0x2fc0005b },
{ 0x5fffc, 0xf8000ad },
{ 0x50330, 0x2f400057 },
{ 0x5fffc, 0x108000b0 },
{ 0x50334, 0x2e400054 },
{ 0x5fffc, 0x114000b2 },
{ 0x50338, 0x2e000050 },
{ 0x5fffc, 0x124000b4 },
{ 0x5033c, 0x2d80004c },
{ 0x5fffc, 0x130000b6 },
{ 0x50340, 0x2d000049 },
{ 0x5fffc, 0x140000b8 },
{ 0x50344, 0x2c800045 },
{ 0x5fffc, 0x150000b9 },
{ 0x50348, 0x2c000042 },
{ 0x5fffc, 0x15c000bd },
{ 0x5034c, 0x2b40003e },
{ 0x5fffc, 0x16c000bf },
{ 0x50350, 0x2a80003b },
{ 0x5fffc, 0x17c000bf },
{ 0x50354, 0x2a000039 },
{ 0x5fffc, 0x188000c2 },
{ 0x50358, 0x29400036 },
{ 0x5fffc, 0x19c000c4 },
{ 0x5035c, 0x28800032 },
{ 0x5fffc, 0x1ac000c5 },
{ 0x50360, 0x2800002f },
{ 0x5fffc, 0x1bc000c7 },
{ 0x50364, 0x2740002c },
{ 0x5fffc, 0x1cc000c8 },
{ 0x50368, 0x26c00029 },
{ 0x5fffc, 0x1dc000c9 },
{ 0x5036c, 0x26000027 },
{ 0x5fffc, 0x1ec000cc },
{ 0x50370, 0x25000024 },
{ 0x5fffc, 0x200000cc },
{ 0x50374, 0x24800021 },
{ 0x5fffc, 0x210000cd },
{ 0x50378, 0x23800020 },
{ 0x5fffc, 0x220000ce },
{ 0x5037c, 0x2300001d },
};
static struct mdp_table_entry mdp_downscale_y_table_PT6TOPT8[] = {
{ 0x5fffc, 0xfe000070 },
{ 0x50300, 0x4bc00068 },
{ 0x5fffc, 0xfe000078 },
{ 0x50304, 0x4bc00060 },
{ 0x5fffc, 0xfe000080 },
{ 0x50308, 0x4b800059 },
{ 0x5fffc, 0xfe000089 },
{ 0x5030c, 0x4b000052 },
{ 0x5fffc, 0xfe400091 },
{ 0x50310, 0x4a80004b },
{ 0x5fffc, 0xfe40009a },
{ 0x50314, 0x4a000044 },
{ 0x5fffc, 0xfe8000a3 },
{ 0x50318, 0x4940003d },
{ 0x5fffc, 0xfec000ac },
{ 0x5031c, 0x48400037 },
{ 0x5fffc, 0xff0000b4 },
{ 0x50320, 0x47800031 },
{ 0x5fffc, 0xff8000bd },
{ 0x50324, 0x4640002b },
{ 0x5fffc, 0xc5 },
{ 0x50328, 0x45000026 },
{ 0x5fffc, 0x8000ce },
{ 0x5032c, 0x43800021 },
{ 0x5fffc, 0x10000d6 },
{ 0x50330, 0x4240001c },
{ 0x5fffc, 0x18000df },
{ 0x50334, 0x40800018 },
{ 0x5fffc, 0x24000e6 },
{ 0x50338, 0x3f000014 },
{ 0x5fffc, 0x30000ee },
{ 0x5033c, 0x3d400010 },
{ 0x5fffc, 0x40000f5 },
{ 0x50340, 0x3b80000c },
{ 0x5fffc, 0x50000fc },
{ 0x50344, 0x39800009 },
{ 0x5fffc, 0x6000102 },
{ 0x50348, 0x37c00006 },
{ 0x5fffc, 0x7000109 },
{ 0x5034c, 0x35800004 },
{ 0x5fffc, 0x840010e },
{ 0x50350, 0x33800002 },
{ 0x5fffc, 0x9800114 },
{ 0x50354, 0x31400000 },
{ 0x5fffc, 0xac00119 },
{ 0x50358, 0x2f4003fe },
{ 0x5fffc, 0xc40011e },
{ 0x5035c, 0x2d0003fc },
{ 0x5fffc, 0xdc00121 },
{ 0x50360, 0x2b0003fb },
{ 0x5fffc, 0xf400125 },
{ 0x50364, 0x28c003fa },
{ 0x5fffc, 0x11000128 },
{ 0x50368, 0x268003f9 },
{ 0x5fffc, 0x12c0012a },
{ 0x5036c, 0x244003f9 },
{ 0x5fffc, 0x1480012c },
{ 0x50370, 0x224003f8 },
{ 0x5fffc, 0x1640012e },
{ 0x50374, 0x200003f8 },
{ 0x5fffc, 0x1800012f },
{ 0x50378, 0x1e0003f8 },
{ 0x5fffc, 0x1a00012f },
{ 0x5037c, 0x1c0003f8 },
};
static struct mdp_table_entry mdp_downscale_y_table_PT8TO1[] = {
{ 0x5fffc, 0x0 },
{ 0x50300, 0x7fc00000 },
{ 0x5fffc, 0xff80000d },
{ 0x50304, 0x7ec003f9 },
{ 0x5fffc, 0xfec0001c },
{ 0x50308, 0x7d4003f3 },
{ 0x5fffc, 0xfe40002b },
{ 0x5030c, 0x7b8003ed },
{ 0x5fffc, 0xfd80003c },
{ 0x50310, 0x794003e8 },
{ 0x5fffc, 0xfcc0004d },
{ 0x50314, 0x76c003e4 },
{ 0x5fffc, 0xfc40005f },
{ 0x50318, 0x73c003e0 },
{ 0x5fffc, 0xfb800071 },
{ 0x5031c, 0x708003de },
{ 0x5fffc, 0xfac00085 },
{ 0x50320, 0x6d0003db },
{ 0x5fffc, 0xfa000098 },
{ 0x50324, 0x698003d9 },
{ 0x5fffc, 0xf98000ac },
{ 0x50328, 0x654003d8 },
{ 0x5fffc, 0xf8c000c1 },
{ 0x5032c, 0x610003d7 },
{ 0x5fffc, 0xf84000d5 },
{ 0x50330, 0x5c8003d7 },
{ 0x5fffc, 0xf7c000e9 },
{ 0x50334, 0x580003d7 },
{ 0x5fffc, 0xf74000fd },
{ 0x50338, 0x534003d8 },
{ 0x5fffc, 0xf6c00112 },
{ 0x5033c, 0x4e8003d8 },
{ 0x5fffc, 0xf6800126 },
{ 0x50340, 0x494003da },
{ 0x5fffc, 0xf600013a },
{ 0x50344, 0x448003db },
{ 0x5fffc, 0xf600014d },
{ 0x50348, 0x3f4003dd },
{ 0x5fffc, 0xf5c00160 },
{ 0x5034c, 0x3a4003df },
{ 0x5fffc, 0xf5c00172 },
{ 0x50350, 0x354003e1 },
{ 0x5fffc, 0xf5c00184 },
{ 0x50354, 0x304003e3 },
{ 0x5fffc, 0xf6000195 },
{ 0x50358, 0x2b0003e6 },
{ 0x5fffc, 0xf64001a6 },
{ 0x5035c, 0x260003e8 },
{ 0x5fffc, 0xf6c001b4 },
{ 0x50360, 0x214003eb },
{ 0x5fffc, 0xf78001c2 },
{ 0x50364, 0x1c4003ee },
{ 0x5fffc, 0xf80001cf },
{ 0x50368, 0x17c003f1 },
{ 0x5fffc, 0xf90001db },
{ 0x5036c, 0x134003f3 },
{ 0x5fffc, 0xfa0001e5 },
{ 0x50370, 0xf0003f6 },
{ 0x5fffc, 0xfb4001ee },
{ 0x50374, 0xac003f9 },
{ 0x5fffc, 0xfcc001f5 },
{ 0x50378, 0x70003fb },
{ 0x5fffc, 0xfe4001fb },
{ 0x5037c, 0x34003fe },
};
struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX] = {
[MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_y_table_PT2TOPT4,
[MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_y_table_PT4TOPT6,
[MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_y_table_PT6TOPT8,
[MDP_DOWNSCALE_PT8TO1] = mdp_downscale_y_table_PT8TO1,
};
struct mdp_table_entry mdp_gaussian_blur_table[] = {
/* max variance */
{ 0x5fffc, 0x20000080 },
{ 0x50280, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50284, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50288, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5028c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50290, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50294, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50298, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5029c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502a0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502a4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502a8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502ac, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502b0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502b4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502b8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502bc, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502c0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502c4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502c8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502cc, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502d0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502d4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502d8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502dc, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502e0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502e4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502e8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502ec, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502f0, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502f4, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502f8, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x502fc, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50300, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50304, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50308, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5030c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50310, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50314, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50318, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5031c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50320, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50324, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50328, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5032c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50330, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50334, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50338, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5033c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50340, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50344, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50348, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5034c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50350, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50354, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50358, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5035c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50360, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50364, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50368, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5036c, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50370, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50374, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x50378, 0x20000080 },
{ 0x5fffc, 0x20000080 },
{ 0x5037c, 0x20000080 },
};
| gpl-2.0 |
TeamICS/heroc-kernel-2.6.35-ics | drivers/pnp/isapnp/compat.c | 14711 | 2196 | /*
* compat.c - A series of functions to make it easier to convert drivers that use
* the old isapnp APIs. If possible use the new APIs instead.
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
*/
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/string.h>
static void pnp_convert_id(char *buf, unsigned short vendor,
unsigned short device)
{
sprintf(buf, "%c%c%c%x%x%x%x",
'A' + ((vendor >> 2) & 0x3f) - 1,
'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
'A' + ((vendor >> 8) & 0x1f) - 1,
(device >> 4) & 0x0f, device & 0x0f,
(device >> 12) & 0x0f, (device >> 8) & 0x0f);
}
struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device,
struct pnp_card *from)
{
char id[8];
char any[8];
struct list_head *list;
pnp_convert_id(id, vendor, device);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
list = from ? from->global_list.next : pnp_cards.next;
while (list != &pnp_cards) {
struct pnp_card *card = global_to_pnp_card(list);
if (compare_pnp_id(card->id, id) || (memcmp(id, any, 7) == 0))
return card;
list = list->next;
}
return NULL;
}
struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
unsigned short function, struct pnp_dev *from)
{
char id[8];
char any[8];
pnp_convert_id(id, vendor, function);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
if (card == NULL) { /* look for a logical device from all cards */
struct list_head *list;
list = pnp_global.next;
if (from)
list = from->global_list.next;
while (list != &pnp_global) {
struct pnp_dev *dev = global_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id) ||
(memcmp(id, any, 7) == 0))
return dev;
list = list->next;
}
} else {
struct list_head *list;
list = card->devices.next;
if (from) {
list = from->card_list.next;
if (from->card != card) /* something is wrong */
return NULL;
}
while (list != &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(list);
if (compare_pnp_id(dev->id, id))
return dev;
list = list->next;
}
}
return NULL;
}
EXPORT_SYMBOL(pnp_find_card);
EXPORT_SYMBOL(pnp_find_dev);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.