repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
kcarden/android_kernel_lge_msm8916 | drivers/usb/misc/sisusbvga/sisusb_init.c | 11746 | 25368 | /*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
* Display mode initializing code
*
* Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria
*
* If distributed as part of the Linux kernel, this code is licensed under the
* terms of the GPL v2.
*
* Otherwise, the following license terms apply:
*
* * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* * are met:
* * 1) Redistributions of source code must retain the above copyright
* * notice, this list of conditions and the following disclaimer.
* * 2) Redistributions in binary form must reproduce the above copyright
* * notice, this list of conditions and the following disclaimer in the
* * documentation and/or other materials provided with the distribution.
* * 3) The name of the author may not be used to endorse or promote products
* * derived from this software without specific prior written permission.
* *
* * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Author: Thomas Winischhofer <thomas@winischhofer.net>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include "sisusb.h"
#ifdef INCL_SISUSB_CON
#include "sisusb_init.h"
/*********************************************/
/* POINTER INITIALIZATION */
/*********************************************/
static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr)
{
SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo;
SiS_Pr->SiS_StandTable = SiSUSB_StandTable;
SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable;
SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable;
SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex;
SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table;
SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData;
}
/*********************************************/
/* HELPER: SetReg, GetReg */
/*********************************************/
static void
SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short data)
{
sisusb_setidxreg(SiS_Pr->sisusb, port, index, data);
}
static void
SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short data)
{
sisusb_setreg(SiS_Pr->sisusb, port, data);
}
static unsigned char
SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index)
{
u8 data;
sisusb_getidxreg(SiS_Pr->sisusb, port, index, &data);
return data;
}
static unsigned char
SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port)
{
u8 data;
sisusb_getreg(SiS_Pr->sisusb, port, &data);
return data;
}
static void
SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataAND,
unsigned short DataOR)
{
sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR);
}
static void
SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataAND)
{
sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND);
}
static void
SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataOR)
{
sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR);
}
/*********************************************/
/* HELPER: DisplayOn, DisplayOff */
/*********************************************/
static void SiS_DisplayOn(struct SiS_Private *SiS_Pr)
{
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF);
}
/*********************************************/
/* HELPER: Init Port Addresses */
/*********************************************/
static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr)
{
SiS_Pr->SiS_P3c4 = BaseAddr + 0x14;
SiS_Pr->SiS_P3d4 = BaseAddr + 0x24;
SiS_Pr->SiS_P3c0 = BaseAddr + 0x10;
SiS_Pr->SiS_P3ce = BaseAddr + 0x1e;
SiS_Pr->SiS_P3c2 = BaseAddr + 0x12;
SiS_Pr->SiS_P3ca = BaseAddr + 0x1a;
SiS_Pr->SiS_P3c6 = BaseAddr + 0x16;
SiS_Pr->SiS_P3c7 = BaseAddr + 0x17;
SiS_Pr->SiS_P3c8 = BaseAddr + 0x18;
SiS_Pr->SiS_P3c9 = BaseAddr + 0x19;
SiS_Pr->SiS_P3cb = BaseAddr + 0x1b;
SiS_Pr->SiS_P3cc = BaseAddr + 0x1c;
SiS_Pr->SiS_P3cd = BaseAddr + 0x1d;
SiS_Pr->SiS_P3da = BaseAddr + 0x2a;
SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04;
}
/*********************************************/
/* HELPER: GetSysFlags */
/*********************************************/
static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr)
{
SiS_Pr->SiS_MyCR63 = 0x63;
}
/*********************************************/
/* HELPER: Init PCI & Engines */
/*********************************************/
static void SiSInitPCIetc(struct SiS_Private *SiS_Pr)
{
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1);
/* - Enable 2D (0x40)
* - Enable 3D (0x02)
* - Enable 3D vertex command fetch (0x10)
* - Enable 3D command parser (0x08)
* - Enable 3D G/L transformation engine (0x80)
*/
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1E, 0xDA);
}
/*********************************************/
/* HELPER: SET SEGMENT REGISTERS */
/*********************************************/
static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp;
value &= 0x00ff;
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0xf0;
temp |= (value >> 4);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp);
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0xf0;
temp |= (value & 0x0f);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
}
static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp;
value &= 0x00ff;
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0x0f;
temp |= (value & 0xf0);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp);
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0x0f;
temp |= (value << 4);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
}
static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value)
{
SiS_SetSegRegLower(SiS_Pr, value);
SiS_SetSegRegUpper(SiS_Pr, value);
}
static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr)
{
SiS_SetSegmentReg(SiS_Pr, 0);
}
static void
SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp = value >> 8;
temp &= 0x07;
temp |= (temp << 4);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1d, temp);
SiS_SetSegmentReg(SiS_Pr, value);
}
static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr)
{
SiS_SetSegmentRegOver(SiS_Pr, 0);
}
static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
{
SiS_ResetSegmentReg(SiS_Pr);
SiS_ResetSegmentRegOver(SiS_Pr);
}
/*********************************************/
/* HELPER: SearchModeID */
/*********************************************/
static int
SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
unsigned short *ModeIdIndex)
{
if ((*ModeNo) <= 0x13) {
if ((*ModeNo) != 0x03)
return 0;
(*ModeIdIndex) = 0;
} else {
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
(*ModeNo))
break;
if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
0xFF)
return 0;
}
}
return 1;
}
/*********************************************/
/* HELPER: ENABLE CRT1 */
/*********************************************/
static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr)
{
/* Enable CRT1 gating */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf);
}
/*********************************************/
/* HELPER: GetColorDepth */
/*********************************************/
static unsigned short
SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
unsigned short modeflag;
short index;
if (ModeNo <= 0x13) {
modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
index = (modeflag & ModeTypeMask) - ModeEGA;
if (index < 0)
index = 0;
return ColorDepth[index];
}
/*********************************************/
/* HELPER: GetOffset */
/*********************************************/
static unsigned short
SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short xres, temp, colordepth, infoflag;
infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
xres = SiS_Pr->SiS_RefIndex[rrti].XRes;
colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex);
temp = xres / 16;
if (infoflag & InterlaceMode)
temp <<= 1;
temp *= colordepth;
if (xres % 16)
temp += (colordepth >> 1);
return temp;
}
/*********************************************/
/* SEQ */
/*********************************************/
static void
SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char SRdata;
int i;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x00, 0x03);
SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata);
for (i = 2; i <= 4; i++) {
SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata);
}
}
/*********************************************/
/* MISC */
/*********************************************/
static void
SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, Miscdata);
}
/*********************************************/
/* CRTC */
/*********************************************/
static void
SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char CRTCdata;
unsigned short i;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
for (i = 0; i <= 0x18; i++) {
CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata);
}
}
/*********************************************/
/* ATT */
/*********************************************/
static void
SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char ARdata;
unsigned short i;
for (i = 0; i <= 0x13; i++) {
ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i];
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, ARdata);
}
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x14);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x00);
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x20);
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
}
/*********************************************/
/* GRC */
/*********************************************/
static void
SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char GRdata;
unsigned short i;
for (i = 0; i <= 0x08; i++) {
GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata);
}
if (SiS_Pr->SiS_ModeType > ModeVGA) {
/* 256 color disable */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3ce, 0x05, 0xBF);
}
}
/*********************************************/
/* CLEAR EXTENDED REGISTERS */
/*********************************************/
static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
{
int i;
for (i = 0x0A; i <= 0x0E; i++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00);
}
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x37, 0xFE);
}
/*********************************************/
/* Get rate index */
/*********************************************/
static unsigned short
SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
unsigned short rrti, i, index, temp;
if (ModeNo <= 0x13)
return 0xFFFF;
index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F;
if (index > 0)
index--;
rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID;
i = 0;
do {
if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo)
break;
temp =
SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask;
if (temp < SiS_Pr->SiS_ModeType)
break;
i++;
index--;
} while (index != 0xFFFF);
i--;
return (rrti + i);
}
/*********************************************/
/* SYNC */
/*********************************************/
static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti)
{
unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8;
sync &= 0xC0;
sync |= 0x2f;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, sync);
}
/*********************************************/
/* CRTC/2 */
/*********************************************/
static void
SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned char index;
unsigned short temp, i, j, modeflag;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC;
for (i = 0, j = 0; i <= 7; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x10; i <= 10; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x15; i <= 12; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x0A; i <= 15; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp);
temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5;
if (modeflag & DoubleScanMode)
temp |= 0x80;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp);
if (SiS_Pr->SiS_ModeType > ModeVGA)
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x14, 0x4F);
}
/*********************************************/
/* OFFSET & PITCH */
/*********************************************/
/* (partly overruled by SetPitch() in XF86) */
/*********************************************/
static void
SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti);
unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
unsigned short temp;
temp = (du >> 8) & 0x0f;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, 0xF0, temp);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF));
if (infoflag & InterlaceMode)
du >>= 1;
du <<= 5;
temp = (du >> 8) & 0xff;
if (du & 0xff)
temp++;
temp++;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp);
}
/*********************************************/
/* VCLK */
/*********************************************/
static void
SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short rrti)
{
unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK;
unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B;
unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01);
}
/*********************************************/
/* FIFO */
/*********************************************/
static void
SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short mi)
{
unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag;
/* disable auto-threshold */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0xFE);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0xAE);
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x09, 0xF0);
if (ModeNo <= 0x13)
return;
if ((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0x34);
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0x01);
}
}
/*********************************************/
/* MODE REGISTERS */
/*********************************************/
static void
SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short rrti)
{
unsigned short data = 0, VCLK = 0, index = 0;
if (ModeNo > 0x13) {
index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK;
VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK;
}
if (VCLK >= 166)
data |= 0x0c;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data);
if (VCLK >= 166)
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1f, 0xe7);
/* DAC speed */
data = 0x03;
if (VCLK >= 260)
data = 0x00;
else if (VCLK >= 160)
data = 0x01;
else if (VCLK >= 135)
data = 0x02;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x07, 0xF8, data);
}
static void
SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short data, infoflag = 0, modeflag;
if (ModeNo <= 0x13)
modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
else {
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
}
/* Disable DPMS */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1F, 0x3F);
data = 0;
if (ModeNo > 0x13) {
if (SiS_Pr->SiS_ModeType > ModeEGA) {
data |= 0x02;
data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2);
}
if (infoflag & InterlaceMode)
data |= 0x20;
}
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data);
data = 0;
if (infoflag & InterlaceMode) {
/* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */
unsigned short hrs =
(SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) |
((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2))
- 3;
unsigned short hto =
(SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) |
((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8))
+ 5;
data = hrs - (hto >> 1) + 3;
}
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF));
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x1a, 0xFC, (data >> 8));
if (modeflag & HalfDCLK)
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0x08);
data = 0;
if (modeflag & LineCompareOff)
data = 0x08;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0xB7, data);
if ((SiS_Pr->SiS_ModeType == ModeEGA) && (ModeNo > 0x13))
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0x40);
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xfb);
data = 0x60;
if (SiS_Pr->SiS_ModeType != ModeText) {
data ^= 0x60;
if (SiS_Pr->SiS_ModeType != ModeEGA)
data ^= 0xA0;
}
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x21, 0x1F, data);
SiS_SetVCLKState(SiS_Pr, ModeNo, rrti);
if (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x31) & 0x40)
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x2c);
else
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x6c);
}
/*********************************************/
/* LOAD DAC */
/*********************************************/
static void
SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData,
unsigned short shiftflag, unsigned short dl, unsigned short ah,
unsigned short al, unsigned short dh)
{
unsigned short d1, d2, d3;
switch (dl) {
case 0:
d1 = dh;
d2 = ah;
d3 = al;
break;
case 1:
d1 = ah;
d2 = al;
d3 = dh;
break;
default:
d1 = al;
d2 = dh;
d3 = ah;
}
SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag));
SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag));
SiS_SetRegByte(SiS_Pr, DACData, (d3 << shiftflag));
}
static void
SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short mi)
{
unsigned short data, data2, time, i, j, k, m, n, o;
unsigned short si, di, bx, sf;
unsigned long DACAddr, DACData;
const unsigned char *table = NULL;
if (ModeNo < 0x13)
data = SiS_Pr->SiS_SModeIDTable[mi].St_ModeFlag;
else
data = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag;
data &= DACInfoFlag;
j = time = 64;
if (data == 0x00)
table = SiS_MDA_DAC;
else if (data == 0x08)
table = SiS_CGA_DAC;
else if (data == 0x10)
table = SiS_EGA_DAC;
else {
j = 16;
time = 256;
table = SiS_VGA_DAC;
}
DACAddr = SiS_Pr->SiS_P3c8;
DACData = SiS_Pr->SiS_P3c9;
sf = 0;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF);
SiS_SetRegByte(SiS_Pr, DACAddr, 0x00);
for (i = 0; i < j; i++) {
data = table[i];
for (k = 0; k < 3; k++) {
data2 = 0;
if (data & 0x01)
data2 += 0x2A;
if (data & 0x02)
data2 += 0x15;
SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf));
data >>= 2;
}
}
if (time == 256) {
for (i = 16; i < 32; i++) {
data = table[i] << sf;
for (k = 0; k < 3; k++)
SiS_SetRegByte(SiS_Pr, DACData, data);
}
si = 32;
for (m = 0; m < 9; m++) {
di = si;
bx = si + 4;
for (n = 0; n < 3; n++) {
for (o = 0; o < 5; o++) {
SiS_WriteDAC(SiS_Pr, DACData, sf, n,
table[di], table[bx],
table[si]);
si++;
}
si -= 2;
for (o = 0; o < 3; o++) {
SiS_WriteDAC(SiS_Pr, DACData, sf, n,
table[di], table[si],
table[bx]);
si--;
}
}
si += 5;
}
}
}
/*********************************************/
/* SET CRT1 REGISTER GROUP */
/*********************************************/
static void
SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
unsigned short StandTableIndex, rrti;
SiS_Pr->SiS_CRT1Mode = ModeNo;
if (ModeNo <= 0x13)
StandTableIndex = 0;
else
StandTableIndex = 1;
SiS_ResetSegmentRegisters(SiS_Pr);
SiS_SetSeqRegs(SiS_Pr, StandTableIndex);
SiS_SetMiscRegs(SiS_Pr, StandTableIndex);
SiS_SetCRTCRegs(SiS_Pr, StandTableIndex);
SiS_SetATTRegs(SiS_Pr, StandTableIndex);
SiS_SetGRCRegs(SiS_Pr, StandTableIndex);
SiS_ClearExt1Regs(SiS_Pr, ModeNo);
rrti = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex);
if (rrti != 0xFFFF) {
SiS_SetCRT1Sync(SiS_Pr, rrti);
SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_SetCRT1VCLK(SiS_Pr, ModeNo, rrti);
}
SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex);
SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex);
SiS_DisplayOn(SiS_Pr);
}
/*********************************************/
/* SiSSetMode() */
/*********************************************/
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
{
unsigned short ModeIdIndex;
unsigned long BaseAddr = SiS_Pr->IOAddress;
SiSUSB_InitPtr(SiS_Pr);
SiSUSBRegInit(SiS_Pr, BaseAddr);
SiS_GetSysFlags(SiS_Pr);
if (!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex)))
return 0;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x05, 0x86);
SiSInitPCIetc(SiS_Pr);
ModeNo &= 0x7f;
SiS_Pr->SiS_ModeType =
SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask;
SiS_Pr->SiS_SetFlag = LowModeTests;
/* Set mode on CRT1 */
SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex);
SiS_HandleCRT1(SiS_Pr);
SiS_DisplayOn(SiS_Pr);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF);
/* Store mode number */
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x34, ModeNo);
return 1;
}
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
{
unsigned short ModeNo = 0;
int i;
SiSUSB_InitPtr(SiS_Pr);
if (VModeNo == 0x03) {
ModeNo = 0x03;
} else {
i = 0;
do {
if (SiS_Pr->SiS_EModeIDTable[i].Ext_VESAID == VModeNo) {
ModeNo = SiS_Pr->SiS_EModeIDTable[i].Ext_ModeID;
break;
}
} while (SiS_Pr->SiS_EModeIDTable[i++].Ext_ModeID != 0xff);
}
if (!ModeNo)
return 0;
return SiSUSBSetMode(SiS_Pr, ModeNo);
}
#endif /* INCL_SISUSB_CON */
| gpl-2.0 |
sonndinh/litmus-rt | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 227 | 68805 | /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*
* Author: Zach Brown <zab@zabbo.net>
* Author: Peter J. Braam <braam@clusterfs.com>
* Author: Phil Schwan <phil@clusterfs.com>
* Author: Eric Barton <eric@bartonsoftware.com>
*
* This file is part of Portals, http://www.sf.net/projects/sandiaportals/
*
* Portals is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* Portals is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Portals; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
ksock_tx_t *
ksocknal_alloc_tx(int type, int size)
{
ksock_tx_t *tx = NULL;
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
/* searching for a noop tx in free list */
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
next, ksock_tx_t, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
if (tx == NULL)
LIBCFS_ALLOC(tx, size);
if (tx == NULL)
return NULL;
atomic_set(&tx->tx_refcount, 1);
tx->tx_zc_aborted = 0;
tx->tx_zc_capable = 0;
tx->tx_zc_checked = 0;
tx->tx_desc_size = size;
atomic_inc(&ksocknal_data.ksnd_nactive_txs);
return tx;
}
ksock_tx_t *
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
ksock_tx_t *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (tx == NULL) {
CERROR("Can't allocate noop tx desc\n");
return NULL;
}
tx->tx_conn = NULL;
tx->tx_lnetmsg = NULL;
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
tx->tx_niov = 1;
tx->tx_nonblk = nonblk;
socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
tx->tx_msg.ksm_zc_cookies[1] = cookie;
return tx;
}
void
ksocknal_free_tx (ksock_tx_t *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
spin_lock(&ksocknal_data.ksnd_tx_lock);
list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
} else {
LIBCFS_FREE(tx, tx->tx_desc_size);
}
}
static int
ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
int rc;
LASSERT (tx->tx_niov > 0);
/* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
rc = ksocknal_lib_send_iov(conn, tx);
if (rc <= 0) /* sent nothing? */
return rc;
nob = rc;
LASSERT (nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" iov */
do {
LASSERT (tx->tx_niov > 0);
if (nob < (int) iov->iov_len) {
iov->iov_base = (void *)((char *)iov->iov_base + nob);
iov->iov_len -= nob;
return rc;
}
nob -= iov->iov_len;
tx->tx_iov = ++iov;
tx->tx_niov--;
} while (nob != 0);
return rc;
}
static int
ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
int rc;
LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
/* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
rc = ksocknal_lib_send_kiov(conn, tx);
if (rc <= 0) /* sent nothing? */
return rc;
nob = rc;
LASSERT (nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" kiov */
do {
LASSERT(tx->tx_nkiov > 0);
if (nob < (int)kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return rc;
}
nob -= (int)kiov->kiov_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
} while (nob != 0);
return rc;
}
static int
ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
LASSERT (tx->tx_resid != 0);
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
LASSERT (conn->ksnc_closing);
return -ESHUTDOWN;
}
do {
if (ksocknal_data.ksnd_enomem_tx > 0) {
/* testing... */
ksocknal_data.ksnd_enomem_tx--;
rc = -EAGAIN;
} else if (tx->tx_niov != 0) {
rc = ksocknal_send_iov (conn, tx);
} else {
rc = ksocknal_send_kiov (conn, tx);
}
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (rc > 0) /* sent something? */
conn->ksnc_tx_bufnob += rc; /* account it */
if (bufnob < conn->ksnc_tx_bufnob) {
/* allocated send buffer bytes < computed; infer
* something got ACKed */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_tx_bufnob = bufnob;
mb();
}
if (rc <= 0) { /* Didn't write anything? */
if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
/* Check if EAGAIN is due to memory pressure */
if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
rc = -ENOMEM;
break;
}
/* socket's wmem_queued now includes 'rc' bytes */
atomic_sub (rc, &conn->ksnc_tx_nob);
rc = 0;
} while (tx->tx_resid != 0);
ksocknal_connsock_decref(conn);
return rc;
}
static int
ksocknal_recv_iov (ksock_conn_t *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
LASSERT (conn->ksnc_rx_niov > 0);
/* Never touch conn->ksnc_rx_iov or change connection
* status inside ksocknal_lib_recv_iov */
rc = ksocknal_lib_recv_iov(conn);
if (rc <= 0)
return rc;
/* received something... */
nob = rc;
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_niov > 0);
if (nob < (int)iov->iov_len) {
iov->iov_len -= nob;
iov->iov_base += nob;
return -EAGAIN;
}
nob -= iov->iov_len;
conn->ksnc_rx_iov = ++iov;
conn->ksnc_rx_niov--;
} while (nob != 0);
return rc;
}
static int
ksocknal_recv_kiov (ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
* status inside ksocknal_lib_recv_iov */
rc = ksocknal_lib_recv_kiov(conn);
if (rc <= 0)
return rc;
/* received something... */
nob = rc;
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_rx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_nkiov > 0);
if (nob < (int) kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return -EAGAIN;
}
nob -= kiov->kiov_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
} while (nob != 0);
return 1;
}
static int
ksocknal_receive (ksock_conn_t *conn)
{
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
* progress/completion. */
int rc;
if (ksocknal_data.ksnd_stall_rx != 0) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
}
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
LASSERT (conn->ksnc_closing);
return -ESHUTDOWN;
}
for (;;) {
if (conn->ksnc_rx_niov != 0)
rc = ksocknal_recv_iov (conn);
else
rc = ksocknal_recv_kiov (conn);
if (rc <= 0) {
/* error/EOF or partial receive */
if (rc == -EAGAIN) {
rc = 1;
} else if (rc == 0 && conn->ksnc_rx_started) {
/* EOF in the middle of a message */
rc = -EPROTO;
}
break;
}
/* Completed a fragment */
if (conn->ksnc_rx_nob_wanted == 0) {
rc = 1;
break;
}
}
ksocknal_connsock_decref(conn);
return rc;
}
void
ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
LASSERT(ni != NULL || tx->tx_conn != NULL);
if (tx->tx_conn != NULL)
ksocknal_conn_decref(tx->tx_conn);
if (ni == NULL && tx->tx_conn != NULL)
ni = tx->tx_conn->ksnc_peer->ksnp_ni;
ksocknal_free_tx (tx);
if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
lnet_finalize (ni, lnetmsg, rc);
}
void
ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
{
ksock_tx_t *tx;
while (!list_empty (txlist)) {
tx = list_entry (txlist->next, ksock_tx_t, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
} else if (error) {
CNETERR("Deleting noop packet\n");
}
list_del (&tx->tx_list);
LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
}
}
static void
ksocknal_check_zc_req(ksock_tx_t *tx)
{
ksock_conn_t *conn = tx->tx_conn;
ksock_peer_t *peer = conn->ksnc_peer;
/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
* zero-copy. Our peer will send an ACK containing this cookie when
* she has received this message to tell us we can signal completion.
* tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
* ksnp_zc_req_list. */
LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT (tx->tx_zc_capable);
tx->tx_zc_checked = 1;
if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
!conn->ksnc_zc_capable)
return;
/* assign cookie and queue tx to pending list, it will be released when
* a matching ack is received. See ksocknal_handle_zcack() */
ksocknal_tx_addref(tx);
spin_lock(&peer->ksnp_lock);
/* ZC_REQ is going to be pinned to the peer */
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
if (peer->ksnp_zc_next_cookie == 0)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
spin_unlock(&peer->ksnp_lock);
}
static void
ksocknal_uncheck_zc_req(ksock_tx_t *tx)
{
ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
tx->tx_zc_checked = 0;
spin_lock(&peer->ksnp_lock);
if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
/* Not waiting for an ACK */
spin_unlock(&peer->ksnp_lock);
return;
}
tx->tx_msg.ksm_zc_cookies[0] = 0;
list_del(&tx->tx_zc_list);
spin_unlock(&peer->ksnp_lock);
ksocknal_tx_decref(tx);
}
static int
ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
rc = ksocknal_transmit (conn, tx);
CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
if (tx->tx_resid == 0) {
/* Sent everything OK */
LASSERT (rc == 0);
return 0;
}
if (rc == -EAGAIN)
return rc;
if (rc == -ENOMEM) {
static int counter;
counter++; /* exponential backoff warnings */
if ((counter & (-counter)) == counter)
CWARN("%u ENOMEM tx %p (%u allocated)\n",
counter, conn, atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime))
wake_up (&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
return rc;
}
/* Actual error */
LASSERT (rc < 0);
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
&conn->ksnc_ipaddr);
break;
default:
LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
&conn->ksnc_ipaddr, rc);
break;
}
CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
}
if (tx->tx_zc_checked)
ksocknal_uncheck_zc_req(tx);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
(conn->ksnc_closing) ? 0 : rc);
return rc;
}
static void
ksocknal_launch_connection_locked (ksock_route_t *route)
{
/* called holding write lock on ksnd_global_lock */
LASSERT (!route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
list_add_tail(&route->ksnr_connd_list,
&ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
void
ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
{
ksock_route_t *route;
/* called holding write lock on ksnd_global_lock */
for (;;) {
/* launch any/all connections that need it */
route = ksocknal_find_connectable_route_locked(peer);
if (route == NULL)
return;
ksocknal_launch_connection_locked(route);
}
}
ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
{
struct list_head *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
list_for_each (tmp, &peer->ksnp_conns) {
ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL &&
c->ksnc_proto->pro_match_tx != NULL);
rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
switch (rc) {
default:
LBUG();
case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
continue;
case SOCKNAL_MATCH_YES: /* typed connection */
if (typed == NULL || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
typed = c;
tnob = nob;
}
break;
case SOCKNAL_MATCH_MAY: /* fallback connection */
if (fallback == NULL || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c;
fnob = nob;
}
break;
}
}
/* prefer the typed selection */
conn = (typed != NULL) ? typed : fallback;
if (conn != NULL)
conn->ksnc_tx_last_post = cfs_time_current();
return conn;
}
void
ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
{
conn->ksnc_proto->pro_pack(tx);
atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
void
ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
{
ksock_sched_t *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
/* called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
* so we don't need the {get,put}connsock dance to deref
* ksnc_sock... */
LASSERT(!conn->ksnc_closing);
CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
ksocknal_tx_prep(conn, tx);
/* Ensure the frags we've been given EXACTLY match the number of
* bytes we want to send. Many TCP/IP stacks disregard any total
* size parameters passed to them and just look at the frags.
*
* We always expect at least 1 mapped fragment containing the
* complete ksocknal message header. */
LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
(unsigned int)tx->tx_nob);
LASSERT (tx->tx_niov >= 1);
LASSERT (tx->tx_resid == tx->tx_nob);
CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
KSOCK_MSG_NOOP,
tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
/*
* FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
* but they're used inside spinlocks a lot.
*/
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
spin_lock_bh(&sched->kss_lock);
if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
conn->ksnc_tx_bufnob = 0;
mb(); /* order with adding to tx_queue */
}
if (msg->ksm_type == KSOCK_MSG_NOOP) {
/* The packet is noop ZC ACK, try to piggyback the ack_cookie
* on a normal packet so I don't need to send it */
LASSERT (msg->ksm_zc_cookies[1] != 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
} else {
/* It's a normal packet - can it piggback a noop zc-ack that
* has been queued already? */
LASSERT (msg->ksm_zc_cookies[1] == 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
/* ztx will be released later */
}
if (ztx != NULL) {
atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
if (conn->ksnc_tx_ready && /* able to send */
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
ksocknal_conn_addref(conn);
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
wake_up (&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
if (route->ksnr_scheduled) /* connections being established */
continue;
/* all route types connected ? */
if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
continue;
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
"Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
&route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
cfs_duration_sec(route->ksnr_timeout - now));
continue;
}
return route;
}
return NULL;
}
ksock_route_t *
ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
{
struct list_head *tmp;
ksock_route_t *route;
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
if (route->ksnr_scheduled)
return route;
}
return NULL;
}
int
ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
{
ksock_peer_t *peer;
ksock_conn_t *conn;
rwlock_t *g_lock;
int retry;
int rc;
LASSERT (tx->tx_conn == NULL);
g_lock = &ksocknal_data.ksnd_global_lock;
for (retry = 0;; retry = 1) {
read_lock(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
if (ksocknal_find_connectable_route_locked(peer) == NULL) {
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
if (conn != NULL) {
/* I've got no routes that need to be
* connecting and I do have an actual
* connection... */
ksocknal_queue_tx_locked (tx, conn);
read_unlock(g_lock);
return 0;
}
}
}
/* I'll need a write lock... */
read_unlock(g_lock);
write_lock_bh(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
break;
write_unlock_bh(g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
CERROR("Refusing to create a connection to userspace process %s\n",
libcfs_id2str(id));
return -EHOSTUNREACH;
}
if (retry) {
CERROR("Can't find peer %s\n", libcfs_id2str(id));
return -EHOSTUNREACH;
}
rc = ksocknal_add_peer(ni, id,
LNET_NIDADDR(id.nid),
lnet_acceptor_port());
if (rc != 0) {
CERROR("Can't add peer %s: %d\n",
libcfs_id2str(id), rc);
return rc;
}
}
ksocknal_launch_all_connections_locked(peer);
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
if (conn != NULL) {
/* Connection exists; queue message on it */
ksocknal_queue_tx_locked (tx, conn);
write_unlock_bh(g_lock);
return 0;
}
if (peer->ksnp_accepting > 0 ||
ksocknal_find_connecting_route_locked (peer) != NULL) {
/* the message is going to be pinned to the peer */
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
}
write_unlock_bh(g_lock);
/* NB Routes may be ignored if connections to them failed recently */
CNETERR("No usable routes to %s\n", libcfs_id2str(id));
return -EHOSTUNREACH;
}
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
ksock_tx_t *tx;
int desc_size;
int rc;
/* NB 'private' is different depending on what we're sending.
* Just ignore it... */
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
LASSERT (payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
LASSERT (!in_interrupt ());
if (payload_iov != NULL)
desc_size = offsetof(ksock_tx_t,
tx_frags.virt.iov[1 + payload_niov]);
else
desc_size = offsetof(ksock_tx_t,
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
mpflag = cfs_memory_pressure_get_and_set();
tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
if (tx == NULL) {
CERROR("Can't allocate tx desc type %d size %d\n",
type, desc_size);
if (lntmsg->msg_vmflush)
cfs_memory_pressure_restore(mpflag);
return -ENOMEM;
}
tx->tx_conn = NULL; /* set when assigned a conn */
tx->tx_lnetmsg = lntmsg;
if (payload_iov != NULL) {
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
tx->tx_niov = 1 +
lnet_extract_iov(payload_niov, &tx->tx_iov[1],
payload_niov, payload_iov,
payload_offset, payload_nob);
} else {
tx->tx_niov = 1;
tx->tx_iov = &tx->tx_frags.paged.iov;
tx->tx_kiov = tx->tx_frags.paged.kiov;
tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
payload_niov, payload_kiov,
payload_offset, payload_nob);
if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
tx->tx_zc_capable = 1;
}
socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
if (!mpflag)
cfs_memory_pressure_restore(mpflag);
if (rc == 0)
return 0;
ksocknal_free_tx(tx);
return -EIO;
}
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
struct task_struct *task = kthread_run(fn, arg, "%s", name);
if (IS_ERR(task))
return PTR_ERR(task);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads++;
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0;
}
void
ksocknal_thread_fini (void)
{
write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
int nob;
unsigned int niov;
int skipped;
LASSERT(conn->ksnc_proto != NULL);
if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
/* Remind the socket to ack eagerly... */
ksocknal_lib_eager_ack(conn);
}
if (nob_to_skip == 0) { /* right at next packet boundary now */
conn->ksnc_rx_started = 0;
mb(); /* racing with timeout thread */
switch (conn->ksnc_proto->pro_version) {
case KSOCK_PROTO_V2:
case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
break;
case KSOCK_PROTO_V1:
/* Receiving bare lnet_hdr_t */
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
break;
default:
LBUG ();
}
conn->ksnc_rx_niov = 1;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_csum = ~0;
return 1;
}
/* Set up to skip as much as possible now. If there's more left
* (ran out of iov entries) we'll get called again */
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
skipped = 0;
niov = 0;
do {
nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
conn->ksnc_rx_iov[niov].iov_len = nob;
niov++;
skipped += nob;
nob_to_skip -=nob;
} while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
conn->ksnc_rx_niov = niov;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_nob_wanted = skipped;
return 0;
}
static int
ksocknal_process_receive (ksock_conn_t *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
int rc;
LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
if (conn->ksnc_rx_nob_wanted != 0) {
rc = ksocknal_receive(conn);
if (rc <= 0) {
LASSERT (rc != -EAGAIN);
if (rc == 0)
CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
conn,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
else if (!conn->ksnc_closing)
CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
/* it's not an error if conn is being closed */
ksocknal_close_conn_and_siblings (conn,
(conn->ksnc_closing) ? 0 : rc);
return (rc == 0 ? -ESHUTDOWN : rc);
}
if (conn->ksnc_rx_nob_wanted != 0) {
/* short read */
return -EAGAIN;
}
}
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_KSM_HEADER:
if (conn->ksnc_flip) {
__swab32s(&conn->ksnc_msg.ksm_type);
__swab32s(&conn->ksnc_msg.ksm_csum);
__swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
__swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
}
if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
CERROR("%s: Unknown message type: %x\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
conn->ksnc_msg.ksm_type);
ksocknal_new_packet(conn, 0);
ksocknal_close_conn_and_siblings(conn, -EPROTO);
return -EPROTO;
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
/* NOOP Checksum error */
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
ksocknal_new_packet(conn, 0);
ksocknal_close_conn_and_siblings(conn, -EPROTO);
return -EIO;
}
if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
__u64 cookie = 0;
LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
cookie = conn->ksnc_msg.ksm_zc_cookies[0];
rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
conn->ksnc_msg.ksm_zc_cookies[1]);
if (rc != 0) {
CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
ksocknal_new_packet(conn, 0);
ksocknal_close_conn_and_siblings(conn, -EPROTO);
return rc;
}
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
ksocknal_new_packet (conn, 0);
return 0; /* NOOP is done and just return */
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
conn->ksnc_rx_niov = 1;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_nkiov = 0;
goto again; /* read lnet header now */
case SOCKNAL_RX_LNET_HEADER:
/* unpack message header */
conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
/* Userspace peer */
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
/* Substitute process ID assigned at connection time */
lhdr->src_pid = cpu_to_le32(id->pid);
lhdr->src_nid = cpu_to_le64(id->nid);
}
conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
ksocknal_conn_addref(conn); /* ++ref while parsing */
rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
&conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
conn->ksnc_peer->ksnp_id.nid, conn, 0);
if (rc < 0) {
/* I just received garbage: give up on this conn */
ksocknal_new_packet(conn, 0);
ksocknal_close_conn_and_siblings (conn, rc);
ksocknal_conn_decref(conn);
return -EPROTO;
}
/* I'm racing with ksocknal_recv() */
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
return 0;
/* ksocknal_recv() got called */
goto again;
case SOCKNAL_RX_LNET_PAYLOAD:
/* payload all received */
rc = 0;
if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
rc = -EIO;
}
if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
rc = conn->ksnc_proto->pro_handle_zcreq(conn,
conn->ksnc_msg.ksm_zc_cookies[0],
*ksocknal_tunables.ksnd_nonblk_zcack ||
le64_to_cpu(lhdr->src_nid) != id->nid);
}
lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
if (rc != 0) {
ksocknal_new_packet(conn, 0);
ksocknal_close_conn_and_siblings (conn, rc);
return -EPROTO;
}
/* Fall through */
case SOCKNAL_RX_SLOP:
/* starting new packet? */
if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
return 0; /* come back later */
goto again; /* try to finish reading slop now */
default:
break;
}
/* Not Reached */
LBUG ();
return -EINVAL; /* keep gcc happy */
}
int
ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = (ksock_conn_t *)private;
ksock_sched_t *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
if (mlen == 0 || iov != NULL) {
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
conn->ksnc_rx_niov =
lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
niov, iov, offset, mlen);
} else {
conn->ksnc_rx_niov = 0;
conn->ksnc_rx_iov = NULL;
conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
conn->ksnc_rx_nkiov =
lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
niov, kiov, offset, mlen);
}
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
LASSERT (conn->ksnc_rx_scheduled);
spin_lock_bh(&sched->kss_lock);
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
wake_up (&sched->kss_waitq);
LASSERT (conn->ksnc_rx_ready);
break;
case SOCKNAL_RX_PARSE:
/* scheduler hasn't noticed I'm parsing yet */
break;
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
spin_unlock_bh(&sched->kss_lock);
ksocknal_conn_decref(conn);
return 0;
}
static inline int
ksocknal_sched_cansleep(ksock_sched_t *sched)
{
int rc;
spin_lock_bh(&sched->kss_lock);
rc = !ksocknal_data.ksnd_shuttingdown &&
list_empty(&sched->kss_rx_conns) &&
list_empty(&sched->kss_tx_conns);
spin_unlock_bh(&sched->kss_lock);
return rc;
}
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
int rc;
int nloops = 0;
long id = (long)arg;
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
cfs_block_allsigs();
rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
if (rc != 0) {
CERROR("Can't set CPT affinity to %d: %d\n",
info->ksi_cpt, rc);
}
spin_lock_bh(&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
int did_something = 0;
/* Ensure I progress everything semi-fairly */
if (!list_empty (&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
ksock_conn_t, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
/* clear rx_ready in case receive isn't complete.
* Do it BEFORE we call process_recv, since
* data_ready can set it any time after we release
* kss_lock. */
conn->ksnc_rx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
rc = ksocknal_process_receive(conn);
spin_lock_bh(&sched->kss_lock);
/* I'm the only one that can clear this flag */
LASSERT(conn->ksnc_rx_scheduled);
/* Did process_receive get everything it wanted? */
if (rc == 0)
conn->ksnc_rx_ready = 1;
if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
/* Conn blocked waiting for ksocknal_recv()
* I change its state (under lock) to signal
* it can be rescheduled */
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
list_add_tail (&conn->ksnc_rx_list,
&sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
/* drop my ref */
ksocknal_conn_decref(conn);
}
did_something = 1;
}
if (!list_empty (&sched->kss_tx_conns)) {
LIST_HEAD (zlist);
if (!list_empty(&sched->kss_zombie_noop_txs)) {
list_add(&zlist,
&sched->kss_zombie_noop_txs);
list_del_init(&sched->kss_zombie_noop_txs);
}
conn = list_entry(sched->kss_tx_conns.next,
ksock_conn_t, ksnc_tx_list);
list_del (&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
/* dequeue now so empty list => more to send */
list_del(&tx->tx_list);
/* Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
* write_space can set it any time after we release
* kss_lock. */
conn->ksnc_tx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
if (!list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
* noop txs are just put in freelist */
ksocknal_txlist_done(NULL, &zlist, 0);
}
rc = ksocknal_process_transmit(conn, tx);
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
spin_lock_bh(&sched->kss_lock);
list_add(&tx->tx_list,
&conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref(tx);
spin_lock_bh(&sched->kss_lock);
/* assume space for more */
conn->ksnc_tx_ready = 1;
}
if (rc == -ENOMEM) {
/* Do nothing; after a short timeout, this
* conn will be reposted on kss_tx_conns. */
} else if (conn->ksnc_tx_ready &&
!list_empty (&conn->ksnc_tx_queue)) {
/* reschedule for tx */
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
/* drop my ref */
ksocknal_conn_decref(conn);
}
did_something = 1;
}
if (!did_something || /* nothing to do */
++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
nloops = 0;
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
LASSERT (rc == 0);
} else {
cond_resched();
}
spin_lock_bh(&sched->kss_lock);
}
}
spin_unlock_bh(&sched->kss_lock);
ksocknal_thread_fini();
return 0;
}
/*
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
void ksocknal_read_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
conn->ksnc_rx_ready = 1;
if (!conn->ksnc_rx_scheduled) { /* not being progressed */
list_add_tail(&conn->ksnc_rx_list,
&sched->kss_rx_conns);
conn->ksnc_rx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
wake_up (&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
/*
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
void ksocknal_write_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
conn->ksnc_tx_ready = 1;
if (!conn->ksnc_tx_scheduled && /* not being progressed */
!list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
list_add_tail (&conn->ksnc_tx_list,
&sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
wake_up (&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
static ksock_proto_t *
ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
{
__u32 version = 0;
if (hello->kshm_magic == LNET_PROTO_MAGIC)
version = hello->kshm_version;
else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
version = __swab32(hello->kshm_version);
if (version != 0) {
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 1)
return NULL;
if (*ksocknal_tunables.ksnd_protocol == 2 &&
version == KSOCK_PROTO_V3)
return NULL;
#endif
if (version == KSOCK_PROTO_V2)
return &ksocknal_protocol_v2x;
if (version == KSOCK_PROTO_V3)
return &ksocknal_protocol_v3x;
return NULL;
}
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
CLASSERT (sizeof (lnet_magicversion_t) ==
offsetof (ksock_hello_msg_t, kshm_src_nid));
if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
return &ksocknal_protocol_v1x;
}
return NULL;
}
int
ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
LASSERT (conn->ksnc_proto != NULL);
hello->kshm_src_nid = ni->ni_nid;
hello->kshm_dst_nid = peer_nid;
hello->kshm_src_pid = the_lnet.ln_pid;
hello->kshm_src_incarnation = net->ksnn_incarnation;
hello->kshm_ctype = conn->ksnc_type;
return conn->ksnc_proto->pro_send_hello(conn, hello);
}
static int
ksocknal_invert_type(int type)
{
switch (type) {
case SOCKLND_CONN_ANY:
case SOCKLND_CONN_CONTROL:
return type;
case SOCKLND_CONN_BULK_IN:
return SOCKLND_CONN_BULK_OUT;
case SOCKLND_CONN_BULK_OUT:
return SOCKLND_CONN_BULK_IN;
default:
return SOCKLND_CONN_NONE;
}
}
int
ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
__u64 *incarnation)
{
/* Return < 0 fatal error
* 0 success
* EALREADY lost connection race
* EPROTO protocol version mismatch
*/
struct socket *sock = conn->ksnc_sock;
int active = (conn->ksnc_proto != NULL);
int timeout;
int proto_match;
int rc;
ksock_proto_t *proto;
lnet_process_id_t recv_id;
/* socket type set on active connections - not set on passive */
LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
if (rc != 0) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
if (hello->kshm_magic != LNET_PROTO_MAGIC &&
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
__cpu_to_le32 (hello->kshm_magic),
LNET_PROTO_TCP_MAGIC,
&conn->ksnc_ipaddr);
return -EPROTO;
}
rc = libcfs_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
if (rc != 0) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
proto = ksocknal_parse_proto_version(hello);
if (proto == NULL) {
if (!active) {
/* unknown protocol from peer, tell peer my protocol */
conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 2)
conn->ksnc_proto = &ksocknal_protocol_v2x;
else if (*ksocknal_tunables.ksnd_protocol == 1)
conn->ksnc_proto = &ksocknal_protocol_v1x;
#endif
hello->kshm_nips = 0;
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
conn->ksnc_proto->pro_version,
&conn->ksnc_ipaddr);
return -EPROTO;
}
proto_match = (conn->ksnc_proto == proto);
conn->ksnc_proto = proto;
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
CERROR("Error %d reading or checking hello from from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
*incarnation = hello->kshm_src_incarnation;
if (hello->kshm_src_nid == LNET_NID_ANY) {
CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
&conn->ksnc_ipaddr);
return -EPROTO;
}
if (!active &&
conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
/* Userspace NAL assigns peer process ID from socket */
recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
} else {
recv_id.nid = hello->kshm_src_nid;
recv_id.pid = hello->kshm_src_pid;
}
if (!active) {
*peerid = recv_id;
/* peer determines type */
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
CERROR("Unexpected type %d from %s ip %pI4h\n",
hello->kshm_ctype, libcfs_id2str(*peerid),
&conn->ksnc_ipaddr);
return -EPROTO;
}
return 0;
}
if (peerid->pid != recv_id.pid ||
peerid->nid != recv_id.nid) {
LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
libcfs_id2str(*peerid),
&conn->ksnc_ipaddr,
libcfs_id2str(recv_id));
return -EPROTO;
}
if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
/* Possible protocol mismatch or I lost the connection race */
return proto_match ? EALREADY : EPROTO;
}
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
conn->ksnc_type, libcfs_id2str(*peerid),
&conn->ksnc_ipaddr,
hello->kshm_ctype);
return -EPROTO;
}
return 0;
}
static int
ksocknal_connect (ksock_route_t *route)
{
LIST_HEAD (zombies);
ksock_peer_t *peer = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
unsigned long deadline;
int retry_later = 0;
int rc = 0;
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
LASSERT (route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
route->ksnr_connecting = 1;
for (;;) {
wanted = ksocknal_route_mask() & ~route->ksnr_connected;
/* stop connecting if peer/route got closed under me, or
* route got connected while queued */
if (peer->ksnp_closing || route->ksnr_deleted ||
wanted == 0) {
retry_later = 0;
break;
}
/* reschedule if peer is connecting to me */
if (peer->ksnp_accepting > 0) {
CDEBUG(D_NET,
"peer %s(%d) already connecting to me, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
retry_later = 1;
}
if (retry_later) /* needs reschedule */
break;
if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
type = SOCKLND_CONN_ANY;
} else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
type = SOCKLND_CONN_CONTROL;
} else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
type = SOCKLND_CONN_BULK_IN;
} else {
LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
type = SOCKLND_CONN_BULK_OUT;
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(cfs_time_current(), deadline)) {
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr,
route->ksnr_port);
goto failed;
}
rc = lnet_connect(&sock, peer->ksnp_id.nid,
route->ksnr_myipaddr,
route->ksnr_ipaddr, route->ksnr_port);
if (rc != 0)
goto failed;
rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
if (rc < 0) {
lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr,
route->ksnr_port);
goto failed;
}
/* A +ve RC means I have to retry because I lost the connection
* race or I have to renegotiate protocol version */
retry_later = (rc != 0);
if (retry_later)
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
}
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
if (retry_later) {
/* re-queue for attention; this frees me up to handle
* the peer's incoming connection request */
if (rc == EALREADY ||
(rc == 0 && peer->ksnp_accepting > 0)) {
/* We want to introduce a delay before next
* attempt to connect if we lost conn race,
* but the race is resolved quickly usually,
* so min_reconnectms should be good heuristic */
route->ksnr_retry_interval =
cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
}
ksocknal_launch_connection_locked(route);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return retry_later;
failed:
write_lock_bh(&ksocknal_data.ksnd_global_lock);
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
/* This is a retry rather than a new connection */
route->ksnr_retry_interval *= 2;
route->ksnr_retry_interval =
max(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
route->ksnr_retry_interval =
min(route->ksnr_retry_interval,
cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
LASSERT (route->ksnr_retry_interval != 0);
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
if (!list_empty(&peer->ksnp_tx_queue) &&
peer->ksnp_accepting == 0 &&
ksocknal_find_connecting_route_locked(peer) == NULL) {
ksock_conn_t *conn;
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x */
if (!list_empty (&peer->ksnp_conns)) {
conn = list_entry(peer->ksnp_conns.next,
ksock_conn_t, ksnc_list);
LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
}
/* take all the blocked packets while I've got the lock and
* complete below... */
list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
#if 0 /* irrelevant with only eager routes */
if (!route->ksnr_deleted) {
/* make this route least-favourite for re-selection */
list_del(&route->ksnr_list);
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
}
#endif
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_peer_failed(peer);
ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
return 0;
}
/*
* check whether we need to create more connds.
* It will try to create new thread if it's necessary, @timeout can
* be updated if failed to create, so caller wouldn't keep try while
* running out of resource.
*/
static int
ksocknal_connd_check_start(long sec, long *timeout)
{
char name[16];
int rc;
int total = ksocknal_data.ksnd_connd_starting +
ksocknal_data.ksnd_connd_running;
if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
/* still in initializing */
return 0;
}
if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
/* can't create more connd, or still have enough
* threads to handle more connecting */
return 0;
}
if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
/* no pending connecting request */
return 0;
}
if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
/* may run out of resource, retry later */
*timeout = cfs_time_seconds(1);
return 0;
}
if (ksocknal_data.ksnd_connd_starting > 0) {
/* serialize starting to avoid flood */
return 0;
}
ksocknal_data.ksnd_connd_starting_stamp = sec;
ksocknal_data.ksnd_connd_starting++;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
/* NB: total is the next id */
snprintf(name, sizeof(name), "socknal_cd%02d", total);
rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
if (rc == 0)
return 1;
/* we tried ... */
LASSERT(ksocknal_data.ksnd_connd_starting > 0);
ksocknal_data.ksnd_connd_starting--;
ksocknal_data.ksnd_connd_failed_stamp = get_seconds();
return 1;
}
/*
* check whether current thread can exit, it will return 1 if there are too
* many threads and no creating in past 120 seconds.
* Also, this function may update @timeout to make caller come back
* again to recheck these conditions.
*/
static int
ksocknal_connd_check_stop(long sec, long *timeout)
{
int val;
if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
/* still in initializing */
return 0;
}
if (ksocknal_data.ksnd_connd_starting > 0) {
/* in progress of starting new thread */
return 0;
}
if (ksocknal_data.ksnd_connd_running <=
*ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
return 0;
}
/* created thread in past 120 seconds? */
val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
SOCKNAL_CONND_TIMEOUT - sec);
*timeout = (val > 0) ? cfs_time_seconds(val) :
cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
if (val > 0)
return 0;
/* no creating in past 120 seconds */
return ksocknal_data.ksnd_connd_running >
ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
}
/* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later */
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
ksock_route_t *route;
unsigned long now;
now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) {
if (route->ksnr_retry_interval == 0 ||
cfs_time_aftereq(now, route->ksnr_timeout))
return route;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
(int)*timeout_p > (int)(route->ksnr_timeout - now))
*timeout_p = (int)(route->ksnr_timeout - now);
}
return NULL;
}
int
ksocknal_connd (void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
ksock_connreq_t *cr;
wait_queue_t wait;
int nloops = 0;
int cons_retry = 0;
cfs_block_allsigs ();
init_waitqueue_entry(&wait, current);
spin_lock_bh(connd_lock);
LASSERT(ksocknal_data.ksnd_connd_starting > 0);
ksocknal_data.ksnd_connd_starting--;
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_route_t *route = NULL;
long sec = get_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
if (ksocknal_connd_check_stop(sec, &timeout)) {
/* wakeup another one to check stop */
wake_up(&ksocknal_data.ksnd_connd_waitq);
break;
}
if (ksocknal_connd_check_start(sec, &timeout)) {
/* created new thread */
dropped_lock = 1;
}
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
next, ksock_connreq_t, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
dropped_lock = 1;
ksocknal_create_conn(cr->ksncr_ni, NULL,
cr->ksncr_sock, SOCKLND_CONN_NONE);
lnet_ni_decref(cr->ksncr_ni);
LIBCFS_FREE(cr, sizeof(*cr));
spin_lock_bh(connd_lock);
}
/* Only handle an outgoing connection request if there
* is a thread left to handle incoming connections and
* create new connd */
if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
ksocknal_data.ksnd_connd_running) {
route = ksocknal_connd_get_route_locked(&timeout);
}
if (route != NULL) {
list_del (&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
dropped_lock = 1;
if (ksocknal_connect(route)) {
/* consecutive retry */
if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
CWARN("massive consecutive re-connecting to %pI4h\n",
&route->ksnr_ipaddr);
cons_retry = 0;
}
} else {
cons_retry = 0;
}
ksocknal_route_decref(route);
spin_lock_bh(connd_lock);
ksocknal_data.ksnd_connd_connecting--;
}
if (dropped_lock) {
if (++nloops < SOCKNAL_RESCHED)
continue;
spin_unlock_bh(connd_lock);
nloops = 0;
cond_resched();
spin_lock_bh(connd_lock);
continue;
}
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
ksocknal_data.ksnd_connd_running--;
spin_unlock_bh(connd_lock);
ksocknal_thread_fini();
return 0;
}
static ksock_conn_t *
ksocknal_find_timed_out_conn (ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
struct list_head *ctmp;
list_for_each (ctmp, &peer->ksnp_conns) {
int error;
conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
/* SOCK_ERROR will reset error code of socket in
* some platform (like Darwin8.x) */
error = conn->ksnc_sock->sk->sk_err;
if (error != 0) {
ksocknal_conn_addref(conn);
switch (error) {
case ECONNRESET:
CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
case ETIMEDOUT:
CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
default:
CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
error,
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
break;
}
return conn;
}
if (conn->ksnc_rx_started &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port,
conn->ksnc_rx_state,
conn->ksnc_rx_nob_wanted,
conn->ksnc_rx_nob_left);
return conn;
}
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port);
return conn;
}
}
return NULL;
}
static inline void
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
LIST_HEAD (stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
while (!list_empty (&peer->ksnp_tx_queue)) {
tx = list_entry (peer->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
list_del (&tx->tx_list);
list_add_tail (&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
}
static int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
return 0;
if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
time_before(cfs_time_current(),
cfs_time_add(peer->ksnp_last_alive,
cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
return 0;
if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
return 0;
/* retry 10 secs later, so we wouldn't put pressure
* on this peer if we failed to send keepalive this time */
peer->ksnp_send_keepalive = cfs_time_shift(10);
conn = ksocknal_find_conn_locked(peer, NULL, 1);
if (conn != NULL) {
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
if (!list_empty(&conn->ksnc_tx_queue)) {
spin_unlock_bh(&sched->kss_lock);
/* there is an queued ACK, don't need keepalive */
return 0;
}
spin_unlock_bh(&sched->kss_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
/* cookie = 1 is reserved for keepalive PING */
tx = ksocknal_alloc_tx_noop(1, 1);
if (tx == NULL) {
read_lock(&ksocknal_data.ksnd_global_lock);
return -ENOMEM;
}
if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
read_lock(&ksocknal_data.ksnd_global_lock);
return 1;
}
ksocknal_free_tx(tx);
read_lock(&ksocknal_data.ksnd_global_lock);
return -EIO;
}
static void
ksocknal_check_peer_timeouts (int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
again:
/* NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
* take a look... */
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
unsigned long deadline = 0;
int resid = 0;
int n = 0;
if (ksocknal_send_keepalive_locked(peer) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
goto again;
}
conn = ksocknal_find_timed_out_conn (peer);
if (conn != NULL) {
read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
/* NB we won't find this one again, but we can't
* just proceed with the next peer, since we dropped
* ksnd_global_lock and it might be dead already! */
ksocknal_conn_decref(conn);
goto again;
}
/* we can't process stale txs right here because we're
* holding only shared lock */
if (!list_empty (&peer->ksnp_tx_queue)) {
ksock_tx_t *tx =
list_entry (peer->ksnp_tx_queue.next,
ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_flush_stale_txs(peer);
ksocknal_peer_decref(peer);
goto again;
}
}
if (list_empty(&peer->ksnp_zc_req_list))
continue;
spin_lock(&peer->ksnp_lock);
list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
/* ignore the TX if connection is being closed */
if (tx->tx_conn->ksnc_closing)
continue;
n++;
}
if (n == 0) {
spin_unlock(&peer->ksnp_lock);
continue;
}
tx = list_entry(peer->ksnp_zc_req_list.next,
ksock_tx_t, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
ksocknal_conn_addref(conn);
spin_unlock(&peer->ksnp_lock);
read_unlock(&ksocknal_data.ksnd_global_lock);
CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
n, libcfs_nid2str(peer->ksnp_id.nid), tx,
cfs_duration_sec(cfs_time_current() - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
ksocknal_conn_decref(conn);
goto again;
}
read_unlock(&ksocknal_data.ksnd_global_lock);
}
int
ksocknal_reaper (void *arg)
{
wait_queue_t wait;
ksock_conn_t *conn;
ksock_sched_t *sched;
struct list_head enomem_conns;
int nenomem_conns;
long timeout;
int i;
int peer_index = 0;
unsigned long deadline = cfs_time_current();
cfs_block_allsigs ();
INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
conn = list_entry (ksocknal_data. \
ksnd_deathrow_conns.next,
ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_terminate_conn(conn);
ksocknal_conn_decref(conn);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
next, ksock_conn_t, ksnc_list);
list_del (&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_destroy_conn(conn);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
list_add(&enomem_conns,
&ksocknal_data.ksnd_enomem_conns);
list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
while (!list_empty (&enomem_conns)) {
conn = list_entry (enomem_conns.next,
ksock_conn_t, ksnc_tx_list);
list_del (&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
LASSERT(conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
wake_up(&sched->kss_waitq);
spin_unlock_bh(&sched->kss_lock);
nenomem_conns++;
}
/* careful with the jiffy wrap... */
while ((timeout = cfs_time_sub(deadline,
cfs_time_current())) <= 0) {
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
/* Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer
* table and I need to check every connection 'n' times
* within a timeout interval, to ensure I detect a
* timeout on any connection within (n+1)/n times the
* timeout interval. */
if (*ksocknal_tunables.ksnd_timeout > n * p)
chunk = (chunk * n * p) /
*ksocknal_tunables.ksnd_timeout;
if (chunk == 0)
chunk = 1;
for (i = 0; i < chunk; i++) {
ksocknal_check_peer_timeouts (peer_index);
peer_index = (peer_index + 1) %
ksocknal_data.ksnd_peer_hash_size;
}
deadline = cfs_time_add(deadline, cfs_time_seconds(p));
}
if (nenomem_conns != 0) {
/* Reduce my timeout if I rescheduled ENOMEM conns.
* This also prevents me getting woken immediately
* if any go back on my enomem list. */
timeout = SOCKNAL_ENOMEM_RETRY;
}
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
set_current_state (TASK_INTERRUPTIBLE);
add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
list_empty (&ksocknal_data.ksnd_zombie_conns))
schedule_timeout(timeout);
set_current_state (TASK_RUNNING);
remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
ksocknal_thread_fini();
return 0;
}
| gpl-2.0 |
KutuSystems/linux | drivers/iommu/omap-iommu.c | 227 | 30219 | /*
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/iommu.h>
#include <linux/omap-iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <asm/cacheflush.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h"
#include "omap-iommu.h"
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
struct device *dev;
spinlock_t lock;
struct iommu_domain domain;
};
#define MMU_LOCK_BASE_SHIFT 10
#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_BASE(x) \
((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_VICT_SHIFT 4
#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
#define MMU_LOCK_VICT(x) \
((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
**/
static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
{
return container_of(dom, struct omap_iommu_domain, domain);
}
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
* @dev: client device
**/
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
u32 *p = obj->ctx;
int i;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
{
u32 val, mask;
if (!obj->syscfg)
return;
mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
val = enable ? mask : 0;
regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
}
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
dra7_cfg_dspsys_mmu(obj, true);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dra7_cfg_dspsys_mmu(obj, false);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
static int iommu_enable(struct omap_iommu *obj)
{
int err;
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
if (err) {
dev_err(obj->dev, "deassert_reset failed: %d\n", err);
return err;
}
}
pm_runtime_get_sync(obj->dev);
err = omap2_iommu_enable(obj);
return err;
}
static void iommu_disable(struct omap_iommu *obj)
{
struct platform_device *pdev = to_platform_device(obj->dev);
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev);
if (pdata && pdata->assert_reset)
pdata->assert_reset(pdev, pdata->reset_name);
}
/*
* TLB operations
*/
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
}
static u32 get_iopte_attr(struct iotlb_entry *e)
{
u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{
u32 status, fault_addr;
status = iommu_read_reg(obj, MMU_IRQSTATUS);
status &= MMU_IRQ_MASK;
if (!status) {
*da = 0;
return 0;
}
fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
*da = fault_addr;
iommu_write_reg(obj, status, MMU_IRQSTATUS);
return status;
}
void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
val = iommu_read_reg(obj, MMU_LOCK);
l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val);
}
void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
{
u32 val;
val = (l->base << MMU_LOCK_BASE_SHIFT);
val |= (l->vict << MMU_LOCK_VICT_SHIFT);
iommu_write_reg(obj, val, MMU_LOCK);
}
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB);
}
/* only used in iotlb iteration for-loop */
struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
struct cr_regs cr;
struct iotlb_lock l;
iotlb_lock_get(obj, &l);
l.vict = n;
iotlb_lock_set(obj, &l);
iotlb_read_cr(obj, &cr);
return cr;
}
#ifdef PREFETCH_IOTLB
static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (!e)
return NULL;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err = 0;
struct iotlb_lock l;
struct cr_regs *cr;
if (!obj || !obj->nr_tlb_entries || !e)
return -EINVAL;
pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &l);
if (l.base == obj->nr_tlb_entries) {
dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
err = -EBUSY;
goto out;
}
if (!e->prsvd) {
int i;
struct cr_regs tmp;
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
if (!iotlb_cr_valid(&tmp))
break;
if (i == obj->nr_tlb_entries) {
dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
err = -EBUSY;
goto out;
}
iotlb_lock_get(obj, &l);
} else {
l.vict = l.base;
iotlb_lock_set(obj, &l);
}
cr = iotlb_alloc_cr(obj, e);
if (IS_ERR(cr)) {
pm_runtime_put_sync(obj->dev);
return PTR_ERR(cr);
}
iotlb_load_cr(obj, cr);
kfree(cr);
if (e->prsvd)
l.base++;
/* increment victim for next tlb load */
if (++l.vict == obj->nr_tlb_entries)
l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
pm_runtime_put_sync(obj->dev);
return err;
}
#else /* !PREFETCH_IOTLB */
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
return 0;
}
#endif /* !PREFETCH_IOTLB */
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
return load_iotlb_entry(obj, e);
}
/**
* flush_iotlb_page - Clear an iommu tlb entry
* @obj: target iommu
* @da: iommu device virtual address
*
* Clear an iommu tlb entry which includes 'da' address.
**/
static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
{
int i;
struct cr_regs cr;
pm_runtime_get_sync(obj->dev);
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
size_t bytes;
if (!iotlb_cr_valid(&cr))
continue;
start = iotlb_cr_to_virt(&cr);
bytes = iopgsz_to_bytes(cr.cam & 3);
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
break;
}
}
pm_runtime_put_sync(obj->dev);
if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
}
/**
* flush_iotlb_all - Clear all iommu tlb entries
* @obj: target iommu
**/
static void flush_iotlb_all(struct omap_iommu *obj)
{
struct iotlb_lock l;
pm_runtime_get_sync(obj->dev);
l.base = 0;
l.vict = 0;
iotlb_lock_set(obj, &l);
iommu_write_reg(obj, 1, MMU_GFLUSH);
pm_runtime_put_sync(obj->dev);
}
/*
* H/W pagetable operations
*/
static void flush_iopgd_range(u32 *first, u32 *last)
{
/* FIXME: L2 cache should be taken care of if it exists */
do {
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
: : "r" (first));
first += L1_CACHE_BYTES / sizeof(*first);
} while (first <= last);
}
static void flush_iopte_range(u32 *first, u32 *last)
{
/* FIXME: L2 cache should be taken care of if it exists */
do {
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
: : "r" (first));
first += L1_CACHE_BYTES / sizeof(*first);
} while (first <= last);
}
static void iopte_free(u32 *iopte)
{
/* Note: freed iopte's must be clean ready for re-use */
if (iopte)
kmem_cache_free(iopte_cachep, iopte);
}
static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
{
u32 *iopte;
/* a table has already existed */
if (*iopgd)
goto pte_ready;
/*
* do the allocation outside the page table lock
*/
spin_unlock(&obj->page_table_lock);
iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
spin_lock(&obj->page_table_lock);
if (!*iopgd) {
if (!iopte)
return ERR_PTR(-ENOMEM);
*iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
flush_iopgd_range(iopgd, iopgd);
dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
} else {
/* We raced, free the reduniovant table */
iopte_free(iopte);
}
pte_ready:
iopte = iopte_offset(iopgd, da);
dev_vdbg(obj->dev,
"%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
__func__, da, iopgd, *iopgd, iopte, *iopte);
return iopte;
}
static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{
u32 *iopgd = iopgd_offset(obj, da);
if ((da | pa) & ~IOSECTION_MASK) {
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
__func__, da, pa, IOSECTION_SIZE);
return -EINVAL;
}
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
flush_iopgd_range(iopgd, iopgd);
return 0;
}
static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{
u32 *iopgd = iopgd_offset(obj, da);
int i;
if ((da | pa) & ~IOSUPER_MASK) {
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
__func__, da, pa, IOSUPER_SIZE);
return -EINVAL;
}
for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
flush_iopgd_range(iopgd, iopgd + 15);
return 0;
}
static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{
u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da);
if (IS_ERR(iopte))
return PTR_ERR(iopte);
*iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
flush_iopte_range(iopte, iopte);
dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
__func__, da, pa, iopte, *iopte);
return 0;
}
static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{
u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da);
int i;
if ((da | pa) & ~IOLARGE_MASK) {
dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
__func__, da, pa, IOLARGE_SIZE);
return -EINVAL;
}
if (IS_ERR(iopte))
return PTR_ERR(iopte);
for (i = 0; i < 16; i++)
*(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
flush_iopte_range(iopte, iopte + 15);
return 0;
}
static int
iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
{
int (*fn)(struct omap_iommu *, u32, u32, u32);
u32 prot;
int err;
if (!obj || !e)
return -EINVAL;
switch (e->pgsz) {
case MMU_CAM_PGSZ_16M:
fn = iopgd_alloc_super;
break;
case MMU_CAM_PGSZ_1M:
fn = iopgd_alloc_section;
break;
case MMU_CAM_PGSZ_64K:
fn = iopte_alloc_large;
break;
case MMU_CAM_PGSZ_4K:
fn = iopte_alloc_page;
break;
default:
fn = NULL;
break;
}
if (WARN_ON(!fn))
return -EINVAL;
prot = get_iopte_attr(e);
spin_lock(&obj->page_table_lock);
err = fn(obj, e->da, e->pa, prot);
spin_unlock(&obj->page_table_lock);
return err;
}
/**
* omap_iopgtable_store_entry - Make an iommu pte entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
static int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err;
flush_iotlb_page(obj, e->da);
err = iopgtable_store_entry_core(obj, e);
if (!err)
prefetch_iotlb_entry(obj, e);
return err;
}
/**
* iopgtable_lookup_entry - Lookup an iommu pte entry
* @obj: target iommu
* @da: iommu device virtual address
* @ppgd: iommu pgd entry pointer to be returned
* @ppte: iommu pte entry pointer to be returned
**/
static void
iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
{
u32 *iopgd, *iopte = NULL;
iopgd = iopgd_offset(obj, da);
if (!*iopgd)
goto out;
if (iopgd_is_table(*iopgd))
iopte = iopte_offset(iopgd, da);
out:
*ppgd = iopgd;
*ppte = iopte;
}
static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
{
size_t bytes;
u32 *iopgd = iopgd_offset(obj, da);
int nent = 1;
if (!*iopgd)
return 0;
if (iopgd_is_table(*iopgd)) {
int i;
u32 *iopte = iopte_offset(iopgd, da);
bytes = IOPTE_SIZE;
if (*iopte & IOPTE_LARGE) {
nent *= 16;
/* rewind to the 1st entry */
iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
}
bytes *= nent;
memset(iopte, 0, nent * sizeof(*iopte));
flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
/*
* do table walk to check if this table is necessary or not
*/
iopte = iopte_offset(iopgd, 0);
for (i = 0; i < PTRS_PER_IOPTE; i++)
if (iopte[i])
goto out;
iopte_free(iopte);
nent = 1; /* for the next L1 entry */
} else {
bytes = IOPGD_SIZE;
if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
nent *= 16;
/* rewind to the 1st entry */
iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
}
bytes *= nent;
}
memset(iopgd, 0, nent * sizeof(*iopgd));
flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
out:
return bytes;
}
/**
* iopgtable_clear_entry - Remove an iommu pte entry
* @obj: target iommu
* @da: iommu device virtual address
**/
static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
{
size_t bytes;
spin_lock(&obj->page_table_lock);
bytes = iopgtable_clear_entry_core(obj, da);
flush_iotlb_page(obj, da);
spin_unlock(&obj->page_table_lock);
return bytes;
}
static void iopgtable_clear_entry_all(struct omap_iommu *obj)
{
int i;
spin_lock(&obj->page_table_lock);
for (i = 0; i < PTRS_PER_IOPGD; i++) {
u32 da;
u32 *iopgd;
da = i << IOPGD_SHIFT;
iopgd = iopgd_offset(obj, da);
if (!*iopgd)
continue;
if (iopgd_is_table(*iopgd))
iopte_free(iopte_offset(iopgd, 0));
*iopgd = 0;
flush_iopgd_range(iopgd, iopgd);
}
flush_iotlb_all(obj);
spin_unlock(&obj->page_table_lock);
}
/*
* Device IOMMU generic operations
*/
static irqreturn_t iommu_fault_handler(int irq, void *data)
{
u32 da, errs;
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
if (!omap_domain->iommu_dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
if (errs == 0)
return IRQ_HANDLED;
/* Fault callback or TLB/PTE Dynamic loading */
if (!report_iommu_fault(domain, obj->dev, da, 0))
return IRQ_HANDLED;
iommu_disable(obj);
iopgd = iopgd_offset(obj, da);
if (!iopgd_is_table(*iopgd)) {
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE;
}
iopte = iopte_offset(iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
return IRQ_NONE;
}
static int device_match_by_alias(struct device *dev, void *data)
{
struct omap_iommu *obj = to_iommu(dev);
const char *name = data;
pr_debug("%s: %s %s\n", __func__, obj->name, name);
return strcmp(obj->name, name) == 0;
}
/**
* omap_iommu_attach() - attach iommu device to an iommu domain
* @name: name of target omap iommu device
* @iopgd: page table
**/
static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{
int err;
struct device *dev;
struct omap_iommu *obj;
dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
device_match_by_alias);
if (!dev)
return ERR_PTR(-ENODEV);
obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
goto err_enable;
flush_iotlb_all(obj);
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
return obj;
err_enable:
spin_unlock(&obj->iommu_lock);
return ERR_PTR(err);
}
/**
* omap_iommu_detach - release iommu device
* @obj: target iommu
**/
static void omap_iommu_detach(struct omap_iommu *obj)
{
if (!obj || IS_ERR(obj))
return;
spin_lock(&obj->iommu_lock);
iommu_disable(obj);
obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
struct omap_iommu *obj)
{
struct device_node *np = pdev->dev.of_node;
int ret;
if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
return 0;
if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
return -EINVAL;
}
obj->syscfg =
syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
if (IS_ERR(obj->syscfg)) {
/* can fail with -EPROBE_DEFER */
ret = PTR_ERR(obj->syscfg);
return ret;
}
if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
&obj->id)) {
dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
return -EINVAL;
}
if (obj->id != 0 && obj->id != 1) {
dev_err(&pdev->dev, "invalid IOMMU instance id\n");
return -EINVAL;
}
return 0;
}
/*
* OMAP Device MMU(IOMMU) detection
*/
static int omap_iommu_probe(struct platform_device *pdev)
{
int err = -ENODEV;
int irq;
struct omap_iommu *obj;
struct resource *res;
struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
if (of) {
obj->name = dev_name(&pdev->dev);
obj->nr_tlb_entries = 32;
err = of_property_read_u32(of, "ti,#tlb-entries",
&obj->nr_tlb_entries);
if (err && err != -EINVAL)
return err;
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
return -EINVAL;
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
} else {
obj->nr_tlb_entries = pdata->nr_tlb_entries;
obj->name = pdata->name;
}
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
spin_lock_init(&obj->iommu_lock);
spin_lock_init(&obj->page_table_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
obj->regbase = devm_ioremap_resource(obj->dev, res);
if (IS_ERR(obj->regbase))
return PTR_ERR(obj->regbase);
err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
if (err)
return err;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENODEV;
err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
dev_name(obj->dev), obj);
if (err < 0)
return err;
platform_set_drvdata(pdev, obj);
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
}
static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
dev_info(&pdev->dev, "%s removed\n", obj->name);
return 0;
}
static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" },
{ .compatible = "ti,dra7-iommu" },
{ .compatible = "ti,dra7-dsp-iommu" },
{},
};
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
.remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
.of_match_table = of_match_ptr(omap_iommu_of_match),
},
};
static void iopte_cachep_ctor(void *iopte)
{
clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
}
static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
{
memset(e, 0, sizeof(*e));
e->da = da;
e->pa = pa;
e->valid = MMU_CAM_V;
e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8;
e->mixed = 0;
return iopgsz_to_bytes(e->pgsz);
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
struct iotlb_entry e;
int omap_pgsz;
u32 ret;
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
dev_err(dev, "invalid size to map: %d\n", bytes);
return -EINVAL;
}
dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
ret = omap_iopgtable_store_entry(oiommu, &e);
if (ret)
dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
return ret;
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
return iopgtable_clear_entry(oiommu, da);
}
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
if (!arch_data || !arch_data->name) {
dev_err(dev, "device doesn't have an associated iommu\n");
return -EINVAL;
}
spin_lock(&omap_domain->lock);
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev) {
dev_err(dev, "iommu domain is already attached\n");
ret = -EBUSY;
goto out;
}
/* get a handle to and enable the omap iommu */
oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
if (IS_ERR(oiommu)) {
ret = PTR_ERR(oiommu);
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
omap_domain->dev = dev;
oiommu->domain = domain;
out:
spin_unlock(&omap_domain->lock);
return ret;
}
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
return;
}
iopgtable_clear_entry_all(oiommu);
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
{
struct omap_iommu_domain *omap_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain)
goto out;
omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
if (!omap_domain->pgtable)
goto fail_nomem;
/*
* should never fail, but please keep this around to ensure
* we keep the hardware happy
*/
if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
goto fail_align;
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
omap_domain->domain.geometry.aperture_start = 0;
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
omap_domain->domain.geometry.force_aperture = true;
return &omap_domain->domain;
fail_align:
kfree(omap_domain->pgtable);
fail_nomem:
kfree(omap_domain);
out:
return NULL;
}
static void omap_iommu_domain_free(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
/*
* An iommu device is still attached
* (currently, only one device can be attached) ?
*/
if (omap_domain->iommu_dev)
_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
phys_addr_t ret = 0;
iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
if (pte) {
if (iopte_is_small(*pte))
ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
(unsigned long long)da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
(unsigned long long)da);
}
return ret;
}
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
struct device_node *np;
struct platform_device *pdev;
/*
* Allocate the archdata iommu structure for DT-based devices.
*
* TODO: Simplify this when removing non-DT support completely from the
* IOMMU users.
*/
if (!dev->of_node)
return 0;
np = of_parse_phandle(dev->of_node, "iommus", 0);
if (!np)
return 0;
pdev = of_find_device_by_node(np);
if (WARN_ON(!pdev)) {
of_node_put(np);
return -EINVAL;
}
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data) {
of_node_put(np);
return -ENOMEM;
}
arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
dev->archdata.iommu = arch_data;
of_node_put(np);
return 0;
}
static void omap_iommu_remove_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
if (!dev->of_node || !arch_data)
return;
kfree(arch_data->name);
kfree(arch_data);
}
static const struct iommu_ops omap_iommu_ops = {
.domain_alloc = omap_iommu_domain_alloc,
.domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
static int __init omap_iommu_init(void)
{
struct kmem_cache *p;
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
np = of_find_matching_node(NULL, omap_iommu_of_match);
if (!np)
return 0;
of_node_put(np);
p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
iopte_cachep_ctor);
if (!p)
return -ENOMEM;
iopte_cachep = p;
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
omap_iommu_debugfs_init();
return platform_driver_register(&omap_iommu_driver);
}
subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
| gpl-2.0 |
Cardinal97/android_kernel_msm8939 | drivers/usb/storage/usb.c | 483 | 30959 | /* Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2003 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
*
* Developed with the assistance of:
* (c) 2000 David L. Brown, Jr. (usb-storage@davidb.org)
* (c) 2003-2009 Alan Stern (stern@rowland.harvard.edu)
*
* Initial work by:
* (c) 1999 Michael Gee (michael@linuxspecific.com)
*
* usb_device_id support by Adam J. Richter (adam@yggdrasil.com):
* (c) 2000 Yggdrasil Computing, Inc.
*
* This driver is based on the 'USB Mass Storage Class' document. This
* describes in detail the protocol used to communicate with such
* devices. Clearly, the designers had SCSI and ATAPI commands in
* mind when they created this document. The commands are all very
* similar to commands in the SCSI-II and ATAPI specifications.
*
* It is important to note that in a number of cases this class
* exhibits class-specific exemptions from the USB specification.
* Notably the usage of NAK, STALL and ACK differs from the norm, in
* that they are used to communicate wait, failed and OK on commands.
*
* Also, for certain devices, the interrupt endpoint is used to convey
* status of a command.
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef CONFIG_USB_STORAGE_DEBUG
#define DEBUG
#endif
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "scsiglue.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "initializers.h"
#include "sierra_ms.h"
#include "option_ms.h"
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
MODULE_LICENSE("GPL");
static unsigned int delay_use = 1;
module_param(delay_use, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
static char quirks[128];
module_param_string(quirks, quirks, sizeof(quirks), S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
/*
* The entries in this table correspond, line for line,
* with the entries in usb_storage_usb_ids[], defined in usual-tables.c.
*/
/* The vendor name should be kept at eight characters or less, and
* the product name should be kept at 16 characters or less. If a device
* has the US_FL_FIX_INQUIRY flag, then the vendor and product names
* normally generated by a device thorugh the INQUIRY response will be
* taken from this list, and this is the reason for the above size
* restriction. However, if the flag is not present, then you
* are free to use as many characters as you like.
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
#define COMPLIANT_DEV UNUSUAL_DEV
#define USUAL_DEV(use_protocol, use_transport) \
{ \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
}
#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
static struct us_unusual_dev for_dynamic_ids =
USUAL_DEV(USB_SC_SCSI, USB_PR_BULK);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
#undef UNUSUAL_VENDOR_INTF
#ifdef CONFIG_LOCKDEP
static struct lock_class_key us_interface_key[USB_MAXINTERFACES];
static void us_set_lock_class(struct mutex *mutex,
struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_config *config = udev->actconfig;
int i;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->interface[i] == intf)
break;
}
BUG_ON(i == config->desc.bNumInterfaces);
lockdep_set_class(mutex, &us_interface_key[i]);
}
#else
static void us_set_lock_class(struct mutex *mutex,
struct usb_interface *intf)
{
}
#endif
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
int usb_stor_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
/* Wait until no command is running */
mutex_lock(&us->dev_mutex);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_SUSPEND);
/* When runtime PM is working, we'll set a flag to indicate
* whether we should autoresume when a SCSI request arrives. */
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_suspend);
int usb_stor_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
mutex_lock(&us->dev_mutex);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_resume);
int usb_stor_reset_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
/* FIXME: Notify the subdrivers that they need to reinitialize
* the device */
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_reset_resume);
#endif /* CONFIG_PM */
/*
* The next two routines get called just before and just after
* a USB port reset, whether from this driver or a different one.
*/
int usb_stor_pre_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Make sure no command runs during the reset */
mutex_lock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_pre_reset);
int usb_stor_post_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
/* FIXME: Notify the subdrivers that they need to reinitialize
* the device */
mutex_unlock(&us->dev_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_post_reset);
/*
* fill_inquiry_response takes an unsigned char array (which must
* be at least 36 characters) and populates the vendor name,
* product name, and revision fields. Then the array is copied
* into the SCSI command's response buffer (oddly enough
* called request_buffer). data_len contains the length of the
* data array, which again must be at least 36.
*/
void fill_inquiry_response(struct us_data *us, unsigned char *data,
unsigned int data_len)
{
if (data_len < 36) /* You lose. */
return;
memset(data+8, ' ', 28);
if (data[0]&0x20) { /* USB device currently not connected. Return
peripheral qualifier 001b ("...however, the
physical device is not currently connected
to this logical unit") and leave vendor and
product identification empty. ("If the target
does store some of the INQUIRY data on the
device, it may return zeros or ASCII spaces
(20h) in those fields until the data is
available from the device."). */
} else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
int n;
n = strlen(us->unusual_dev->vendorName);
memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
n = strlen(us->unusual_dev->productName);
memcpy(data+16, us->unusual_dev->productName, min(16, n));
data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
data[35] = 0x30 + ((bcdDevice) & 0x0F);
}
usb_stor_set_xfer_buf(data, data_len, us->srb);
}
EXPORT_SYMBOL_GPL(fill_inquiry_response);
static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
for (;;) {
usb_stor_dbg(us, "*** thread sleeping\n");
if (wait_for_completion_interruptible(&us->cmnd_ready))
break;
usb_stor_dbg(us, "*** thread awakened\n");
/* lock the device pointers */
mutex_lock(&(us->dev_mutex));
/* lock access to the state */
scsi_lock(host);
/* When we are called with no command pending, we're done */
if (us->srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
usb_stor_dbg(us, "-- exiting\n");
break;
}
/* has the command timed out *already* ? */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
us->srb->result = DID_ABORT << 16;
goto SkipForAbort;
}
scsi_unlock(host);
/* reject the command if the direction indicator
* is UNKNOWN
*/
if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
usb_stor_dbg(us, "UNKNOWN data direction\n");
us->srb->result = DID_ERROR << 16;
}
/* reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
else if (us->srb->device->id &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
usb_stor_dbg(us, "Bad target number (%d:%d)\n",
us->srb->device->id, us->srb->device->lun);
us->srb->result = DID_BAD_TARGET << 16;
}
else if (us->srb->device->lun > us->max_lun) {
usb_stor_dbg(us, "Bad LUN (%d:%d)\n",
us->srb->device->id, us->srb->device->lun);
us->srb->result = DID_BAD_TARGET << 16;
}
/* Handle those devices which need us to fake
* their inquiry data */
else if ((us->srb->cmnd[0] == INQUIRY) &&
(us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x02,
0x1F, 0x00, 0x00, 0x00};
usb_stor_dbg(us, "Faking INQUIRY command\n");
fill_inquiry_response(us, data_ptr, 36);
us->srb->result = SAM_STAT_GOOD;
}
/* we've got a command, let's do it! */
else {
US_DEBUG(usb_stor_show_command(us, us->srb));
us->proto_handler(us->srb, us);
usb_mark_last_busy(us->pusb_dev);
}
/* lock access to the state */
scsi_lock(host);
/* indicate that the command is done */
if (us->srb->result != DID_ABORT << 16) {
usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
us->srb->result);
us->srb->scsi_done(us->srb);
} else {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
}
/* If an abort request was received we need to signal that
* the abort has finished. The proper test for this is
* the TIMED_OUT flag, not srb->result == DID_ABORT, because
* the timeout might have occurred after the command had
* already completed with a different result code. */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
complete(&(us->notify));
/* Allow USB transfers to resume */
clear_bit(US_FLIDX_ABORTING, &us->dflags);
clear_bit(US_FLIDX_TIMED_OUT, &us->dflags);
}
/* finished working on this command */
us->srb = NULL;
scsi_unlock(host);
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
} /* for (;;) */
/* Wait until we are told to stop */
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
break;
schedule();
}
__set_current_state(TASK_RUNNING);
return 0;
}
/***********************************************************************
* Device probing and disconnecting
***********************************************************************/
/* Associate our private data with the USB device */
static int associate_dev(struct us_data *us, struct usb_interface *intf)
{
/* Fill in the device-related fields */
us->pusb_dev = interface_to_usbdev(intf);
us->pusb_intf = intf;
us->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usb_stor_dbg(us, "Vendor: 0x%04x, Product: 0x%04x, Revision: 0x%04x\n",
le16_to_cpu(us->pusb_dev->descriptor.idVendor),
le16_to_cpu(us->pusb_dev->descriptor.idProduct),
le16_to_cpu(us->pusb_dev->descriptor.bcdDevice));
usb_stor_dbg(us, "Interface Subclass: 0x%02x, Protocol: 0x%02x\n",
intf->cur_altsetting->desc.bInterfaceSubClass,
intf->cur_altsetting->desc.bInterfaceProtocol);
/* Store our private data in the interface */
usb_set_intfdata(intf, us);
/* Allocate the control/setup and DMA-mapped buffers */
us->cr = kmalloc(sizeof(*us->cr), GFP_KERNEL);
if (!us->cr)
return -ENOMEM;
us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE,
GFP_KERNEL, &us->iobuf_dma);
if (!us->iobuf) {
usb_stor_dbg(us, "I/O buffer allocation failed\n");
return -ENOMEM;
}
return 0;
}
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
static void adjust_quirks(struct us_data *us)
{
char *p;
u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
US_FL_FIX_CAPACITY |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE);
p = quirks;
while (*p) {
/* Each entry consists of VID:PID:flags */
if (vid == simple_strtoul(p, &p, 16) &&
*p == ':' &&
pid == simple_strtoul(p+1, &p, 16) &&
*p == ':')
break;
/* Move forward to the next entry */
while (*p) {
if (*p++ == ',')
break;
}
}
if (!*p) /* No match */
return;
/* Collect the flags */
while (*++p && *p != ',') {
switch (TOLOWER(*p)) {
case 'a':
f |= US_FL_SANE_SENSE;
break;
case 'b':
f |= US_FL_BAD_SENSE;
break;
case 'c':
f |= US_FL_FIX_CAPACITY;
break;
case 'd':
f |= US_FL_NO_READ_DISC_INFO;
break;
case 'e':
f |= US_FL_NO_READ_CAPACITY_16;
break;
case 'h':
f |= US_FL_CAPACITY_HEURISTICS;
break;
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
case 'm':
f |= US_FL_MAX_SECTORS_64;
break;
case 'n':
f |= US_FL_INITIAL_READ10;
break;
case 'o':
f |= US_FL_CAPACITY_OK;
break;
case 'p':
f |= US_FL_WRITE_CACHE;
break;
case 'r':
f |= US_FL_IGNORE_RESIDUE;
break;
case 's':
f |= US_FL_SINGLE_LUN;
break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
/* Ignore unrecognized flag characters */
}
}
us->fflags = (us->fflags & ~mask) | f;
}
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
struct us_unusual_dev *unusual_dev)
{
struct usb_device *dev = us->pusb_dev;
struct usb_interface_descriptor *idesc =
&us->pusb_intf->cur_altsetting->desc;
struct device *pdev = &us->pusb_intf->dev;
/* Store the entries */
us->unusual_dev = unusual_dev;
us->subclass = (unusual_dev->useProtocol == USB_SC_DEVICE) ?
idesc->bInterfaceSubClass :
unusual_dev->useProtocol;
us->protocol = (unusual_dev->useTransport == USB_PR_DEVICE) ?
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
adjust_quirks(us);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
return -ENODEV;
}
/*
* This flag is only needed when we're in high-speed, so let's
* disable it if we're in full-speed
*/
if (dev->speed != USB_SPEED_HIGH)
us->fflags &= ~US_FL_GO_SLOW;
if (us->fflags)
dev_info(pdev, "Quirks match for vid %04x pid %04x: %lx\n",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct),
us->fflags);
/* Log a message if a non-generic unusual_dev entry contains an
* unnecessary subclass or protocol override. This may stimulate
* reports from users that will help us remove unneeded entries
* from the unusual_devs.h table.
*/
if (id->idVendor || id->idProduct) {
static const char *msgs[3] = {
"an unneeded SubClass entry",
"an unneeded Protocol entry",
"unneeded SubClass and Protocol entries"};
struct usb_device_descriptor *ddesc = &dev->descriptor;
int msg = -1;
if (unusual_dev->useProtocol != USB_SC_DEVICE &&
us->subclass == idesc->bInterfaceSubClass)
msg += 1;
if (unusual_dev->useTransport != USB_PR_DEVICE &&
us->protocol == idesc->bInterfaceProtocol)
msg += 2;
if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE))
dev_notice(pdev, "This device "
"(%04x,%04x,%04x S %02x P %02x)"
" has %s in unusual_devs.h (kernel"
" %s)\n"
" Please send a copy of this message to "
"<linux-usb@vger.kernel.org> and "
"<usb-storage@lists.one-eyed-alien.net>\n",
le16_to_cpu(ddesc->idVendor),
le16_to_cpu(ddesc->idProduct),
le16_to_cpu(ddesc->bcdDevice),
idesc->bInterfaceSubClass,
idesc->bInterfaceProtocol,
msgs[msg],
utsname()->release);
}
return 0;
}
/* Get the transport settings */
static void get_transport(struct us_data *us)
{
switch (us->protocol) {
case USB_PR_CB:
us->transport_name = "Control/Bulk";
us->transport = usb_stor_CB_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 7;
break;
case USB_PR_CBI:
us->transport_name = "Control/Bulk/Interrupt";
us->transport = usb_stor_CB_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 7;
break;
case USB_PR_BULK:
us->transport_name = "Bulk";
us->transport = usb_stor_Bulk_transport;
us->transport_reset = usb_stor_Bulk_reset;
break;
}
}
/* Get the protocol settings */
static void get_protocol(struct us_data *us)
{
switch (us->subclass) {
case USB_SC_RBC:
us->protocol_name = "Reduced Block Commands (RBC)";
us->proto_handler = usb_stor_transparent_scsi_command;
break;
case USB_SC_8020:
us->protocol_name = "8020i";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_QIC:
us->protocol_name = "QIC-157";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_8070:
us->protocol_name = "8070i";
us->proto_handler = usb_stor_pad12_command;
us->max_lun = 0;
break;
case USB_SC_SCSI:
us->protocol_name = "Transparent SCSI";
us->proto_handler = usb_stor_transparent_scsi_command;
break;
case USB_SC_UFI:
us->protocol_name = "Uniform Floppy Interface (UFI)";
us->proto_handler = usb_stor_ufi_command;
break;
}
}
/* Get the pipe settings */
static int get_pipes(struct us_data *us)
{
struct usb_host_interface *altsetting =
us->pusb_intf->cur_altsetting;
int i;
struct usb_endpoint_descriptor *ep;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct usb_endpoint_descriptor *ep_int = NULL;
/*
* Find the first endpoint of each type we need.
* We are expecting a minimum of 2 endpoints - in and out (bulk).
* An optional interrupt-in is OK (necessary for CBI protocol).
* We will ignore any others.
*/
for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
ep = &altsetting->endpoint[i].desc;
if (usb_endpoint_xfer_bulk(ep)) {
if (usb_endpoint_dir_in(ep)) {
if (!ep_in)
ep_in = ep;
} else {
if (!ep_out)
ep_out = ep;
}
}
else if (usb_endpoint_is_int_in(ep)) {
if (!ep_int)
ep_int = ep;
}
}
if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int)) {
usb_stor_dbg(us, "Endpoint sanity check failed! Rejecting dev.\n");
return -EIO;
}
/* Calculate and store the pipe values */
us->send_ctrl_pipe = usb_sndctrlpipe(us->pusb_dev, 0);
us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0);
us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev,
usb_endpoint_num(ep_out));
us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev,
usb_endpoint_num(ep_in));
if (ep_int) {
us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev,
usb_endpoint_num(ep_int));
us->ep_bInterval = ep_int->bInterval;
}
return 0;
}
/* Initialize SCSI device auto-suspend timeout here */
static void usb_stor_set_scsi_autosuspend(struct us_data *us)
{
struct usb_device *udev = us->pusb_dev;
struct usb_host_config *config = udev->actconfig;
struct usb_host_interface *intf;
int i;
/*
* Some USB UICC devices has Mass storage interface along
* with CCID interface. These cards are inserted all the
* time. Enable SCSI auto-suspend for such devices.
*/
for (i = 0; i < config->desc.bNumInterfaces; i++) {
intf = config->interface[i]->cur_altsetting;
if (intf->desc.bInterfaceClass == USB_CLASS_CSCID) {
us->sdev_autosuspend_delay = 2000; /* msec */
return;
}
}
}
/* Initialize all the dynamic resources we need */
static int usb_stor_acquire_resources(struct us_data *us)
{
int p;
struct task_struct *th;
us->current_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!us->current_urb) {
usb_stor_dbg(us, "URB allocation failed\n");
return -ENOMEM;
}
/* Just before we start our control thread, initialize
* the device if it needs initialization */
if (us->unusual_dev->initFunction) {
p = us->unusual_dev->initFunction(us);
if (p)
return p;
}
/* Start up our control thread */
th = kthread_run(usb_stor_control_thread, us, "usb-storage");
if (IS_ERR(th)) {
dev_warn(&us->pusb_intf->dev,
"Unable to start control thread\n");
return PTR_ERR(th);
}
us->ctl_thread = th;
return 0;
}
/* Release all our dynamic resources */
static void usb_stor_release_resources(struct us_data *us)
{
/* Tell the control thread to exit. The SCSI host must
* already have been removed and the DISCONNECTING flag set
* so that we won't accept any more commands.
*/
usb_stor_dbg(us, "-- sending exit command to thread\n");
complete(&us->cmnd_ready);
if (us->ctl_thread)
kthread_stop(us->ctl_thread);
/* Call the destructor routine, if it exists */
if (us->extra_destructor) {
usb_stor_dbg(us, "-- calling extra_destructor()\n");
us->extra_destructor(us->extra);
}
/* Free the extra data and the URB */
kfree(us->extra);
usb_free_urb(us->current_urb);
}
/* Dissociate from the USB device */
static void dissociate_dev(struct us_data *us)
{
/* Free the buffers */
kfree(us->cr);
usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma);
/* Remove our private data from the interface */
usb_set_intfdata(us->pusb_intf, NULL);
}
/* First stage of disconnect processing: stop SCSI scanning,
* remove the host, and stop accepting new commands
*/
static void quiesce_and_remove_host(struct us_data *us)
{
struct Scsi_Host *host = us_to_host(us);
/* If the device is really gone, cut short reset delays */
if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
wake_up(&us->delay_wait);
}
/* Prevent SCSI scanning (if it hasn't started yet)
* or wait for the SCSI-scanning routine to stop.
*/
cancel_delayed_work_sync(&us->scan_dwork);
/* Balance autopm calls if scanning was cancelled */
if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
usb_autopm_put_interface_no_suspend(us->pusb_intf);
/* Removing the host will perform an orderly shutdown: caches
* synchronized, disks spun down, etc.
*/
scsi_remove_host(host);
/* Prevent any new commands from being accepted and cut short
* reset delays.
*/
scsi_lock(host);
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
scsi_unlock(host);
wake_up(&us->delay_wait);
}
/* Second stage of disconnect processing: deallocate all resources */
static void release_everything(struct us_data *us)
{
usb_stor_release_resources(us);
dissociate_dev(us);
/* Drop our reference to the host; the SCSI core will free it
* (and "us" along with it) when the refcount becomes 0. */
scsi_host_put(us_to_host(us));
}
/* Delayed-work routine to carry out SCSI-device scanning */
static void usb_stor_scan_dwork(struct work_struct *work)
{
struct us_data *us = container_of(work, struct us_data,
scan_dwork.work);
struct device *dev = &us->pusb_intf->dev;
dev_dbg(dev, "starting scan\n");
/* For bulk-only devices, determine the max LUN value */
if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
mutex_lock(&us->dev_mutex);
us->max_lun = usb_stor_Bulk_max_lun(us);
mutex_unlock(&us->dev_mutex);
}
scsi_scan_host(us_to_host(us));
dev_dbg(dev, "scan complete\n");
/* Should we unbind if no devices were detected? */
usb_autopm_put_interface(us->pusb_intf);
clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
}
static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
if (usb_dev->bus->sg_tablesize) {
return usb_dev->bus->sg_tablesize;
}
return SG_ALL;
}
/* First part of general USB mass-storage probing */
int usb_stor_probe1(struct us_data **pus,
struct usb_interface *intf,
const struct usb_device_id *id,
struct us_unusual_dev *unusual_dev)
{
struct Scsi_Host *host;
struct us_data *us;
int result;
dev_info(&intf->dev, "USB Mass Storage device detected\n");
/*
* Ask the SCSI layer to allocate a host structure, with extra
* space at the end for our private us_data structure.
*/
host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
if (!host) {
dev_warn(&intf->dev, "Unable to allocate the scsi host\n");
return -ENOMEM;
}
/*
* Allow 16-byte CDBs and thus > 2TB
*/
host->max_cmd_len = 16;
host->sg_tablesize = usb_stor_sg_tablesize(intf);
*pus = us = host_to_us(host);
mutex_init(&(us->dev_mutex));
us_set_lock_class(&us->dev_mutex, intf);
init_completion(&us->cmnd_ready);
init_completion(&(us->notify));
init_waitqueue_head(&us->delay_wait);
INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
/* Associate the us_data structure with the USB device */
result = associate_dev(us, intf);
if (result)
goto BadDevice;
/* Get the unusual_devs entries and the descriptors */
result = get_device_info(us, id, unusual_dev);
if (result)
goto BadDevice;
/* Get standard transport and protocol settings */
get_transport(us);
get_protocol(us);
/* Give the caller a chance to fill in specialized transport
* or protocol settings.
*/
return 0;
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_probe1);
/* Second part of general USB mass-storage probing */
int usb_stor_probe2(struct us_data *us)
{
int result;
struct device *dev = &us->pusb_intf->dev;
/* Make sure the transport and protocol have both been set */
if (!us->transport || !us->proto_handler) {
result = -ENXIO;
goto BadDevice;
}
usb_stor_dbg(us, "Transport: %s\n", us->transport_name);
usb_stor_dbg(us, "Protocol: %s\n", us->protocol_name);
/* fix for single-lun devices */
if (us->fflags & US_FL_SINGLE_LUN)
us->max_lun = 0;
if (!(us->fflags & US_FL_SCM_MULT_TARG))
us_to_host(us)->max_id = 1;
/* Find the endpoints and calculate pipe values */
result = get_pipes(us);
if (result)
goto BadDevice;
/*
* If the device returns invalid data for the first READ(10)
* command, indicate the command should be retried.
*/
if (us->fflags & US_FL_INITIAL_READ10)
set_bit(US_FLIDX_REDO_READ10, &us->dflags);
/* Acquire all the other resources and add the host */
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
us->sdev_autosuspend_delay = -1;
usb_stor_set_scsi_autosuspend(us);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
result = scsi_add_host(us_to_host(us), dev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");
goto BadDevice;
}
/* Submit the delayed_work for SCSI-device scanning */
usb_autopm_get_interface_no_resume(us->pusb_intf);
set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
if (delay_use > 0)
dev_dbg(dev, "waiting for device to settle before scanning\n");
queue_delayed_work(system_freezable_wq, &us->scan_dwork,
delay_use * HZ);
return 0;
/* We come here if there are any problems */
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
return result;
}
EXPORT_SYMBOL_GPL(usb_stor_probe2);
/* Handle a USB mass-storage disconnect */
void usb_stor_disconnect(struct usb_interface *intf)
{
struct us_data *us = usb_get_intfdata(intf);
quiesce_and_remove_host(us);
release_everything(us);
}
EXPORT_SYMBOL_GPL(usb_stor_disconnect);
/* The main probe routine for standard devices */
static int storage_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_unusual_dev *unusual_dev;
struct us_data *us;
int result;
int size;
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.
*/
if (usb_usual_ignore_device(intf))
return -ENXIO;
/*
* Call the general probe procedures.
*
* The unusual_dev_list array is parallel to the usb_storage_usb_ids
* table, so we use the index of the id entry to find the
* corresponding unusual_devs entry.
*/
size = ARRAY_SIZE(us_unusual_dev_list);
if (id >= usb_storage_usb_ids && id < usb_storage_usb_ids + size) {
unusual_dev = (id - usb_storage_usb_ids) + us_unusual_dev_list;
} else {
unusual_dev = &for_dynamic_ids;
dev_dbg(&intf->dev, "Use Bulk-Only transport with the Transparent SCSI protocol for dynamic id: 0x%04x 0x%04x\n",
id->idVendor, id->idProduct);
}
result = usb_stor_probe1(&us, intf, id, unusual_dev);
if (result)
return result;
/* No special transport or protocol settings in the main module */
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver usb_storage_driver = {
.name = "usb-storage",
.probe = storage_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = usb_storage_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
};
module_usb_driver(usb_storage_driver);
| gpl-2.0 |
pritanshchandra/purex_kernel_xolo_black | arch/arm/mach-s3c24xx/mach-smdk2410.c | 2531 | 3309 | /* linux/arch/arm/mach-s3c2410/mach-smdk2410.c
*
* linux/arch/arm/mach-s3c2410/mach-smdk2410.c
*
* Copyright (C) 2004 by FS Forth-Systeme GmbH
* All rights reserved.
*
* @Author: Jonas Dietsche
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
* @History:
* derived from linux/arch/arm/mach-s3c2410/mach-bast.c, written by
* Ben Dooks <ben@simtec.co.uk>
*
***********************************************************************/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/samsung-time.h>
#include "common.h"
#include "common-smdk.h"
static struct map_desc smdk2410_iodesc[] __initdata = {
/* nothing here yet */
};
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg smdk2410_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
}
};
static struct platform_device *smdk2410_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
};
static void __init smdk2410_map_io(void)
{
s3c24xx_init_io(smdk2410_iodesc, ARRAY_SIZE(smdk2410_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(smdk2410_uartcfgs, ARRAY_SIZE(smdk2410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void __init smdk2410_init(void)
{
s3c_i2c0_set_platdata(NULL);
platform_add_devices(smdk2410_devices, ARRAY_SIZE(smdk2410_devices));
smdk_machine_init();
}
MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
* to SMDK2410 */
/* Maintainer: Jonas Dietsche */
.atag_offset = 0x100,
.map_io = smdk2410_map_io,
.init_irq = s3c2410_init_irq,
.init_machine = smdk2410_init,
.init_time = samsung_timer_init,
.restart = s3c2410_restart,
MACHINE_END
| gpl-2.0 |
evan6200/cirrfy_pos | drivers/staging/comedi/drivers/serial2002.c | 2531 | 20624 | /*
comedi/drivers/serial2002.c
Skeleton code for a Comedi driver
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2002 Anders Blomdell <anders.blomdell@control.lth.se>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: serial2002
Description: Driver for serial connected hardware
Devices:
Author: Anders Blomdell
Updated: Fri, 7 Jun 2002 12:56:45 -0700
Status: in development
*/
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/termios.h>
#include <asm/ioctls.h>
#include <linux/serial.h>
#include <linux/poll.h>
/*
* Board descriptions for two imaginary boards. Describing the
* boards in this way is optional, and completely driver-dependent.
* Some drivers use arrays such as this, other do not.
*/
struct serial2002_board {
const char *name;
};
static const struct serial2002_board serial2002_boards[] = {
{
.name = "serial2002"}
};
/*
* Useful for shorthand access to the particular board structure
*/
#define thisboard ((const struct serial2002_board *)dev->board_ptr)
struct serial2002_range_table_t {
/* HACK... */
int length;
struct comedi_krange range;
};
struct serial2002_private {
int port; /* /dev/ttyS<port> */
int speed; /* baudrate */
struct file *tty;
unsigned int ao_readback[32];
unsigned char digital_in_mapping[32];
unsigned char digital_out_mapping[32];
unsigned char analog_in_mapping[32];
unsigned char analog_out_mapping[32];
unsigned char encoder_in_mapping[32];
struct serial2002_range_table_t in_range[32], out_range[32];
};
/*
* most drivers define the following macro to make it easy to
* access the private structure.
*/
#define devpriv ((struct serial2002_private *)dev->private)
static int serial2002_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int serial2002_detach(struct comedi_device *dev);
struct comedi_driver driver_serial2002 = {
.driver_name = "serial2002",
.module = THIS_MODULE,
.attach = serial2002_attach,
.detach = serial2002_detach,
.board_name = &serial2002_boards[0].name,
.offset = sizeof(struct serial2002_board),
.num_names = ARRAY_SIZE(serial2002_boards),
};
static int serial2002_di_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int serial2002_do_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int serial2002_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int serial2002_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
static int serial2002_ao_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
struct serial_data {
enum { is_invalid, is_digital, is_channel } kind;
int index;
unsigned long value;
};
static long tty_ioctl(struct file *f, unsigned op, unsigned long param)
{
if (f->f_op->unlocked_ioctl)
return f->f_op->unlocked_ioctl(f, op, param);
return -ENOSYS;
}
static int tty_write(struct file *f, unsigned char *buf, int count)
{
int result;
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
f->f_pos = 0;
result = f->f_op->write(f, buf, count, &f->f_pos);
set_fs(oldfs);
return result;
}
#if 0
/*
* On 2.6.26.3 this occaisonally gave me page faults, worked around by
* settings.c_cc[VMIN] = 0; settings.c_cc[VTIME] = 0
*/
static int tty_available(struct file *f)
{
long result = 0;
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
tty_ioctl(f, FIONREAD, (unsigned long)&result);
set_fs(oldfs);
return result;
}
#endif
static int tty_read(struct file *f, int timeout)
{
int result;
result = -1;
if (!IS_ERR(f)) {
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
if (f->f_op->poll) {
struct poll_wqueues table;
struct timeval start, now;
do_gettimeofday(&start);
poll_initwait(&table);
while (1) {
long elapsed;
int mask;
mask = f->f_op->poll(f, &table.pt);
if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
POLLHUP | POLLERR)) {
break;
}
do_gettimeofday(&now);
elapsed =
(1000000 * (now.tv_sec - start.tv_sec) +
now.tv_usec - start.tv_usec);
if (elapsed > timeout) {
break;
}
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(((timeout -
elapsed) * HZ) / 10000);
}
poll_freewait(&table);
{
unsigned char ch;
f->f_pos = 0;
if (f->f_op->read(f, &ch, 1, &f->f_pos) == 1) {
result = ch;
}
}
} else {
/* Device does not support poll, busy wait */
int retries = 0;
while (1) {
unsigned char ch;
retries++;
if (retries >= timeout) {
break;
}
f->f_pos = 0;
if (f->f_op->read(f, &ch, 1, &f->f_pos) == 1) {
result = ch;
break;
}
udelay(100);
}
}
set_fs(oldfs);
}
return result;
}
static void tty_setspeed(struct file *f, int speed)
{
mm_segment_t oldfs;
oldfs = get_fs();
set_fs(KERNEL_DS);
{
/* Set speed */
struct termios settings;
tty_ioctl(f, TCGETS, (unsigned long)&settings);
/* printk("Speed: %d\n", settings.c_cflag & (CBAUD | CBAUDEX)); */
settings.c_iflag = 0;
settings.c_oflag = 0;
settings.c_lflag = 0;
settings.c_cflag = CLOCAL | CS8 | CREAD;
settings.c_cc[VMIN] = 0;
settings.c_cc[VTIME] = 0;
switch (speed) {
case 2400:{
settings.c_cflag |= B2400;
}
break;
case 4800:{
settings.c_cflag |= B4800;
}
break;
case 9600:{
settings.c_cflag |= B9600;
}
break;
case 19200:{
settings.c_cflag |= B19200;
}
break;
case 38400:{
settings.c_cflag |= B38400;
}
break;
case 57600:{
settings.c_cflag |= B57600;
}
break;
case 115200:{
settings.c_cflag |= B115200;
}
break;
default:{
settings.c_cflag |= B9600;
}
break;
}
tty_ioctl(f, TCSETS, (unsigned long)&settings);
/* printk("Speed: %d\n", settings.c_cflag & (CBAUD | CBAUDEX)); */
}
{
/* Set low latency */
struct serial_struct settings;
tty_ioctl(f, TIOCGSERIAL, (unsigned long)&settings);
settings.flags |= ASYNC_LOW_LATENCY;
tty_ioctl(f, TIOCSSERIAL, (unsigned long)&settings);
}
set_fs(oldfs);
}
static void poll_digital(struct file *f, int channel)
{
char cmd;
cmd = 0x40 | (channel & 0x1f);
tty_write(f, &cmd, 1);
}
static void poll_channel(struct file *f, int channel)
{
char cmd;
cmd = 0x60 | (channel & 0x1f);
tty_write(f, &cmd, 1);
}
static struct serial_data serial_read(struct file *f, int timeout)
{
struct serial_data result;
int length;
result.kind = is_invalid;
result.index = 0;
result.value = 0;
length = 0;
while (1) {
int data = tty_read(f, timeout);
length++;
if (data < 0) {
printk("serial2002 error\n");
break;
} else if (data & 0x80) {
result.value = (result.value << 7) | (data & 0x7f);
} else {
if (length == 1) {
switch ((data >> 5) & 0x03) {
case 0:{
result.value = 0;
result.kind = is_digital;
}
break;
case 1:{
result.value = 1;
result.kind = is_digital;
}
break;
}
} else {
result.value =
(result.value << 2) | ((data & 0x60) >> 5);
result.kind = is_channel;
}
result.index = data & 0x1f;
break;
}
}
return result;
}
static void serial_write(struct file *f, struct serial_data data)
{
if (data.kind == is_digital) {
unsigned char ch =
((data.value << 5) & 0x20) | (data.index & 0x1f);
tty_write(f, &ch, 1);
} else {
unsigned char ch[6];
int i = 0;
if (data.value >= (1L << 30)) {
ch[i] = 0x80 | ((data.value >> 30) & 0x03);
i++;
}
if (data.value >= (1L << 23)) {
ch[i] = 0x80 | ((data.value >> 23) & 0x7f);
i++;
}
if (data.value >= (1L << 16)) {
ch[i] = 0x80 | ((data.value >> 16) & 0x7f);
i++;
}
if (data.value >= (1L << 9)) {
ch[i] = 0x80 | ((data.value >> 9) & 0x7f);
i++;
}
ch[i] = 0x80 | ((data.value >> 2) & 0x7f);
i++;
ch[i] = ((data.value << 5) & 0x60) | (data.index & 0x1f);
i++;
tty_write(f, ch, i);
}
}
static int serial_2002_open(struct comedi_device *dev)
{
int result;
char port[20];
sprintf(port, "/dev/ttyS%d", devpriv->port);
devpriv->tty = filp_open(port, O_RDWR, 0);
if (IS_ERR(devpriv->tty)) {
result = (int)PTR_ERR(devpriv->tty);
printk("serial_2002: file open error = %d\n", result);
} else {
struct config_t {
short int kind;
short int bits;
int min;
int max;
};
struct config_t *dig_in_config;
struct config_t *dig_out_config;
struct config_t *chan_in_config;
struct config_t *chan_out_config;
int i;
result = 0;
dig_in_config = kcalloc(32, sizeof(struct config_t),
GFP_KERNEL);
dig_out_config = kcalloc(32, sizeof(struct config_t),
GFP_KERNEL);
chan_in_config = kcalloc(32, sizeof(struct config_t),
GFP_KERNEL);
chan_out_config = kcalloc(32, sizeof(struct config_t),
GFP_KERNEL);
if (!dig_in_config || !dig_out_config
|| !chan_in_config || !chan_out_config) {
result = -ENOMEM;
goto err_alloc_configs;
}
tty_setspeed(devpriv->tty, devpriv->speed);
poll_channel(devpriv->tty, 31); /* Start reading configuration */
while (1) {
struct serial_data data;
data = serial_read(devpriv->tty, 1000);
if (data.kind != is_channel || data.index != 31
|| !(data.value & 0xe0)) {
break;
} else {
int command, channel, kind;
struct config_t *cur_config = NULL;
channel = data.value & 0x1f;
kind = (data.value >> 5) & 0x7;
command = (data.value >> 8) & 0x3;
switch (kind) {
case 1:{
cur_config = dig_in_config;
}
break;
case 2:{
cur_config = dig_out_config;
}
break;
case 3:{
cur_config = chan_in_config;
}
break;
case 4:{
cur_config = chan_out_config;
}
break;
case 5:{
cur_config = chan_in_config;
}
break;
}
if (cur_config) {
cur_config[channel].kind = kind;
switch (command) {
case 0:{
cur_config[channel].bits
=
(data.value >> 10) &
0x3f;
}
break;
case 1:{
int unit, sign, min;
unit =
(data.value >> 10) &
0x7;
sign =
(data.value >> 13) &
0x1;
min =
(data.value >> 14) &
0xfffff;
switch (unit) {
case 0:{
min =
min
*
1000000;
}
break;
case 1:{
min =
min
*
1000;
}
break;
case 2:{
min =
min
* 1;
}
break;
}
if (sign) {
min = -min;
}
cur_config[channel].min
= min;
}
break;
case 2:{
int unit, sign, max;
unit =
(data.value >> 10) &
0x7;
sign =
(data.value >> 13) &
0x1;
max =
(data.value >> 14) &
0xfffff;
switch (unit) {
case 0:{
max =
max
*
1000000;
}
break;
case 1:{
max =
max
*
1000;
}
break;
case 2:{
max =
max
* 1;
}
break;
}
if (sign) {
max = -max;
}
cur_config[channel].max
= max;
}
break;
}
}
}
}
for (i = 0; i <= 4; i++) {
/* Fill in subdev data */
struct config_t *c;
unsigned char *mapping = NULL;
struct serial2002_range_table_t *range = NULL;
int kind = 0;
switch (i) {
case 0:{
c = dig_in_config;
mapping = devpriv->digital_in_mapping;
kind = 1;
}
break;
case 1:{
c = dig_out_config;
mapping = devpriv->digital_out_mapping;
kind = 2;
}
break;
case 2:{
c = chan_in_config;
mapping = devpriv->analog_in_mapping;
range = devpriv->in_range;
kind = 3;
}
break;
case 3:{
c = chan_out_config;
mapping = devpriv->analog_out_mapping;
range = devpriv->out_range;
kind = 4;
}
break;
case 4:{
c = chan_in_config;
mapping = devpriv->encoder_in_mapping;
range = devpriv->in_range;
kind = 5;
}
break;
default:{
c = NULL;
}
break;
}
if (c) {
struct comedi_subdevice *s;
const struct comedi_lrange **range_table_list =
NULL;
unsigned int *maxdata_list;
int j, chan;
for (chan = 0, j = 0; j < 32; j++) {
if (c[j].kind == kind) {
chan++;
}
}
s = &dev->subdevices[i];
s->n_chan = chan;
s->maxdata = 0;
kfree(s->maxdata_list);
s->maxdata_list = maxdata_list =
kmalloc(sizeof(unsigned int) * s->n_chan,
GFP_KERNEL);
if (!s->maxdata_list)
break; /* error handled below */
kfree(s->range_table_list);
s->range_table = NULL;
s->range_table_list = NULL;
if (range) {
s->range_table_list = range_table_list =
kmalloc(sizeof
(struct
serial2002_range_table_t) *
s->n_chan, GFP_KERNEL);
if (!s->range_table_list)
break; /* err handled below */
}
for (chan = 0, j = 0; j < 32; j++) {
if (c[j].kind == kind) {
if (mapping) {
mapping[chan] = j;
}
if (range) {
range[j].length = 1;
range[j].range.min =
c[j].min;
range[j].range.max =
c[j].max;
range_table_list[chan] =
(const struct
comedi_lrange *)
&range[j];
}
maxdata_list[chan] =
((long long)1 << c[j].bits)
- 1;
chan++;
}
}
}
}
if (i <= 4) {
/* Failed to allocate maxdata_list or range_table_list
* for a subdevice that needed it. */
result = -ENOMEM;
for (i = 0; i <= 4; i++) {
struct comedi_subdevice *s;
s = &dev->subdevices[i];
kfree(s->maxdata_list);
s->maxdata_list = NULL;
kfree(s->range_table_list);
s->range_table_list = NULL;
}
}
err_alloc_configs:
kfree(dig_in_config);
kfree(dig_out_config);
kfree(chan_in_config);
kfree(chan_out_config);
if (result) {
if (devpriv->tty) {
filp_close(devpriv->tty, 0);
devpriv->tty = NULL;
}
}
}
return result;
}
static void serial_2002_close(struct comedi_device *dev)
{
if (!IS_ERR(devpriv->tty) && (devpriv->tty != 0)) {
filp_close(devpriv->tty, 0);
}
}
static int serial2002_di_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan;
chan = devpriv->digital_in_mapping[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++) {
struct serial_data read;
poll_digital(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
if (read.kind != is_digital || read.index == chan) {
break;
}
}
data[n] = read.value;
}
return n;
}
static int serial2002_do_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan;
chan = devpriv->digital_out_mapping[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++) {
struct serial_data write;
write.kind = is_digital;
write.index = chan;
write.value = data[n];
serial_write(devpriv->tty, write);
}
return n;
}
static int serial2002_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan;
chan = devpriv->analog_in_mapping[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++) {
struct serial_data read;
poll_channel(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
if (read.kind != is_channel || read.index == chan) {
break;
}
}
data[n] = read.value;
}
return n;
}
static int serial2002_ao_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan;
chan = devpriv->analog_out_mapping[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++) {
struct serial_data write;
write.kind = is_channel;
write.index = chan;
write.value = data[n];
serial_write(devpriv->tty, write);
devpriv->ao_readback[chan] = data[n];
}
return n;
}
static int serial2002_ao_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan = CR_CHAN(insn->chanspec);
for (n = 0; n < insn->n; n++) {
data[n] = devpriv->ao_readback[chan];
}
return n;
}
static int serial2002_ei_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int chan;
chan = devpriv->encoder_in_mapping[CR_CHAN(insn->chanspec)];
for (n = 0; n < insn->n; n++) {
struct serial_data read;
poll_channel(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
if (read.kind != is_channel || read.index == chan) {
break;
}
}
data[n] = read.value;
}
return n;
}
static int serial2002_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
printk("comedi%d: serial2002: ", dev->minor);
dev->board_name = thisboard->name;
if (alloc_private(dev, sizeof(struct serial2002_private)) < 0) {
return -ENOMEM;
}
dev->open = serial_2002_open;
dev->close = serial_2002_close;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
printk("/dev/ttyS%d @ %d\n", devpriv->port, devpriv->speed);
if (alloc_subdevices(dev, 5) < 0)
return -ENOMEM;
/* digital input subdevice */
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_read = &serial2002_di_rinsn;
/* digital output subdevice */
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_write = &serial2002_do_winsn;
/* analog input subdevice */
s = dev->subdevices + 2;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = 0;
s->insn_read = &serial2002_ai_rinsn;
/* analog output subdevice */
s = dev->subdevices + 3;
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = 0;
s->insn_write = &serial2002_ao_winsn;
s->insn_read = &serial2002_ao_rinsn;
/* encoder input subdevice */
s = dev->subdevices + 4;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
s->n_chan = 0;
s->maxdata = 1;
s->range_table = 0;
s->insn_read = &serial2002_ei_rinsn;
return 1;
}
static int serial2002_detach(struct comedi_device *dev)
{
struct comedi_subdevice *s;
int i;
printk("comedi%d: serial2002: remove\n", dev->minor);
for (i = 0; i < 5; i++) {
s = &dev->subdevices[i];
kfree(s->maxdata_list);
kfree(s->range_table_list);
}
return 0;
}
static int __init driver_serial2002_init_module(void)
{
return comedi_driver_register(&driver_serial2002);
}
static void __exit driver_serial2002_cleanup_module(void)
{
comedi_driver_unregister(&driver_serial2002);
}
module_init(driver_serial2002_init_module);
module_exit(driver_serial2002_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TeamExodus/kernel_moto_shamu | drivers/gpu/drm/r128/r128_irq.c | 2531 | 3508 | /* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Eric Anholt <anholt@FreeBSD.org>
*/
#include <drm/drmP.h>
#include <drm/r128_drm.h>
#include "r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
status = R128_READ(R128_GEN_INT_STATUS);
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_enable_vblank(struct drm_device *dev, int crtc)
{
drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
return -EINVAL;
}
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return 0;
}
void r128_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available)
*
* R128_WRITE(R128_GEN_INT_CNTL,
* R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
*/
}
void r128_driver_irq_preinstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
/* Clear vblank bit if it's already high */
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
int r128_driver_irq_postinstall(struct drm_device *dev)
{
return 0;
}
void r128_driver_irq_uninstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
}
| gpl-2.0 |
sub77-bkp/android_kernel_samsung_golden | drivers/media/dvb/dvb-usb/dibusb-common.c | 2787 | 12127 | /* Common methods for dibusb-based-receivers.
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "dibusb.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS);
MODULE_LICENSE("GPL");
#define deb_info(args...) dprintk(debug,0x01,args)
/* common stuff used by the different dibusb modules */
int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.fifo_ctrl != NULL)
if (st->ops.fifo_ctrl(adap->fe,onoff)) {
err("error while controlling the fifo of the demod.");
return -ENODEV;
}
}
return 0;
}
EXPORT_SYMBOL(dibusb_streaming_ctrl);
int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_ctrl != NULL)
st->ops.pid_ctrl(adap->fe,index,pid,onoff);
}
return 0;
}
EXPORT_SYMBOL(dibusb_pid_filter);
int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_parse != NULL)
if (st->ops.pid_parse(adap->fe,onoff) < 0)
err("could not handle pid_parser");
}
return 0;
}
EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 b[3];
int ret;
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
ret = dvb_usb_generic_write(d,b,3);
msleep(10);
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
u8 b[3] = { 0 };
int ret;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
return ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
return ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
return dvb_usb_generic_write(adap->dev,b,3);
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
if (onoff) {
u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
return dvb_usb_generic_write(d,b,3);
} else
return 0;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
/* write only ? */
int wo = (rbuf == NULL || rlen == 0),
len = 2 + wlen + (wo ? 0 : 2);
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
memcpy(&sndbuf[2],wbuf,wlen);
if (!wo) {
sndbuf[wlen+2] = (rlen >> 8) & 0xff;
sndbuf[wlen+3] = rlen & 0xff;
}
return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
}
/*
* I2C master xfer function
*/
static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
for (i = 0; i < num; i++) {
/* write/read request */
if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0
&& (msg[i+1].flags & I2C_M_RD)) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,
msg[i+1].buf,msg[i+1].len) < 0)
break;
i++;
} else if ((msg[i].flags & I2C_M_RD) == 0) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
break;
} else if (msg[i].addr != 0x50) {
/* 0x50 is the address of the eeprom - we need to protect it
* from dibusb's bad i2c implementation: reads without
* writing the offset before are forbidden */
if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0)
break;
}
}
mutex_unlock(&d->i2c_mutex);
return i;
}
static u32 dibusb_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
struct i2c_algorithm dibusb_i2c_algo = {
.master_xfer = dibusb_i2c_xfer,
.functionality = dibusb_i2c_func,
};
EXPORT_SYMBOL(dibusb_i2c_algo);
int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
{
u8 wbuf[1] = { offs };
return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
}
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
/* 3000MC/P stuff */
// Config Adjacent channels Perf -cal22
static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
.band_caps = BAND_VHF | BAND_UHF,
.setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
.agc1_max = 48497,
.agc1_min = 23593,
.agc2_max = 46531,
.agc2_min = 24904,
.agc1_pt1 = 0x65,
.agc1_pt2 = 0x69,
.agc1_slope1 = 0x51,
.agc1_slope2 = 0x27,
.agc2_pt1 = 0,
.agc2_pt2 = 0x33,
.agc2_slope1 = 0x35,
.agc2_slope2 = 0x37,
};
static struct dib3000mc_config stk3000p_dib3000p_config = {
&dib3000p_mt2060_agc_config,
.max_time = 0x196,
.ln_adc_level = 0x1cc7,
.output_mpeg2_in_188_bytes = 1,
.agc_command1 = 1,
.agc_command2 = 1,
};
static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
.band_caps = BAND_VHF | BAND_UHF,
.setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
.agc1_max = 56361,
.agc1_min = 22282,
.agc2_max = 47841,
.agc2_min = 36045,
.agc1_pt1 = 0x3b,
.agc1_pt2 = 0x6b,
.agc1_slope1 = 0x55,
.agc1_slope2 = 0x1d,
.agc2_pt1 = 0,
.agc2_pt2 = 0x0a,
.agc2_slope1 = 0x95,
.agc2_slope2 = 0x1e,
};
#if defined(CONFIG_DVB_DIB3000MC) || \
(defined(CONFIG_DVB_DIB3000MC_MODULE) && defined(MODULE))
static struct dib3000mc_config mod3000p_dib3000p_config = {
&dib3000p_panasonic_agc_config,
.max_time = 0x51,
.ln_adc_level = 0x1cc7,
.output_mpeg2_in_188_bytes = 1,
.agc_command1 = 1,
.agc_command2 = 1,
};
int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
{
if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
adap->dev->udev->descriptor.idProduct ==
USB_PID_LITEON_DVB_T_WARM) {
msleep(1000);
}
if ((adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL ||
(adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL) {
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
st->ops.pid_parse = dib3000mc_pid_parse;
st->ops.pid_ctrl = dib3000mc_pid_control;
}
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
static struct mt2060_config stk3000p_mt2060_config = {
0x60
};
int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dibusb_state *st = adap->priv;
u8 a,b;
u16 if1 = 1220;
struct i2c_adapter *tun_i2c;
// First IF calibration for Liteon Sticks
if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
adap->dev->udev->descriptor.idProduct == USB_PID_LITEON_DVB_T_WARM) {
dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
if (a == 0x00)
if1 += b;
else if (a == 0x80)
if1 -= b;
else
warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
} else if (adap->dev->udev->descriptor.idVendor == USB_VID_DIBCOM &&
adap->dev->udev->descriptor.idProduct == USB_PID_DIBCOM_MOD3001_WARM) {
u8 desc;
dibusb_read_eeprom_byte(adap->dev, 7, &desc);
if (desc == 2) {
a = 127;
do {
dibusb_read_eeprom_byte(adap->dev, a, &desc);
a--;
} while (a > 7 && (desc == 0xff || desc == 0x00));
if (desc & 0x80)
if1 -= (0xff - desc);
else
if1 += desc;
}
}
tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe, 1);
if (dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
/* not found - use panasonic pll parameters */
if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
return -ENOMEM;
} else {
st->mt2060_present = 1;
/* set the correct parameters for the dib3000p */
dib3000mc_set_config(adap->fe, &stk3000p_dib3000p_config);
}
return 0;
}
EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
#endif
/*
* common remote control stuff
*/
struct rc_map_table rc_map_dibusb_table[] = {
/* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
{ 0x0003, KEY_1 },
{ 0x0001, KEY_2 },
{ 0x0006, KEY_3 },
{ 0x0009, KEY_4 },
{ 0x001d, KEY_5 },
{ 0x001f, KEY_6 },
{ 0x000d, KEY_7 },
{ 0x0019, KEY_8 },
{ 0x001b, KEY_9 },
{ 0x0015, KEY_0 },
{ 0x0005, KEY_CHANNELUP },
{ 0x0002, KEY_CHANNELDOWN },
{ 0x001e, KEY_VOLUMEUP },
{ 0x000a, KEY_VOLUMEDOWN },
{ 0x0011, KEY_RECORD },
{ 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */
{ 0x0014, KEY_PLAY },
{ 0x001a, KEY_STOP },
{ 0x0040, KEY_REWIND },
{ 0x0012, KEY_FASTFORWARD },
{ 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */
{ 0x004c, KEY_PAUSE },
{ 0x004d, KEY_SCREEN }, /* Full screen mode. */
{ 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */
/* additional keys TwinHan VisionPlus, the Artec seemingly not have */
{ 0x000c, KEY_CANCEL }, /* Cancel */
{ 0x001c, KEY_EPG }, /* EPG */
{ 0x0000, KEY_TAB }, /* Tab */
{ 0x0048, KEY_INFO }, /* Preview */
{ 0x0004, KEY_LIST }, /* RecordList */
{ 0x000f, KEY_TEXT }, /* Teletext */
/* Key codes for the KWorld/ADSTech/JetWay remote. */
{ 0x8612, KEY_POWER },
{ 0x860f, KEY_SELECT }, /* source */
{ 0x860c, KEY_UNKNOWN }, /* scan */
{ 0x860b, KEY_EPG },
{ 0x8610, KEY_MUTE },
{ 0x8601, KEY_1 },
{ 0x8602, KEY_2 },
{ 0x8603, KEY_3 },
{ 0x8604, KEY_4 },
{ 0x8605, KEY_5 },
{ 0x8606, KEY_6 },
{ 0x8607, KEY_7 },
{ 0x8608, KEY_8 },
{ 0x8609, KEY_9 },
{ 0x860a, KEY_0 },
{ 0x8618, KEY_ZOOM },
{ 0x861c, KEY_UNKNOWN }, /* preview */
{ 0x8613, KEY_UNKNOWN }, /* snap */
{ 0x8600, KEY_UNDO },
{ 0x861d, KEY_RECORD },
{ 0x860d, KEY_STOP },
{ 0x860e, KEY_PAUSE },
{ 0x8616, KEY_PLAY },
{ 0x8611, KEY_BACK },
{ 0x8619, KEY_FORWARD },
{ 0x8614, KEY_UNKNOWN }, /* pip */
{ 0x8615, KEY_ESC },
{ 0x861a, KEY_UP },
{ 0x861e, KEY_DOWN },
{ 0x861f, KEY_LEFT },
{ 0x861b, KEY_RIGHT },
/* Key codes for the DiBcom MOD3000 remote. */
{ 0x8000, KEY_MUTE },
{ 0x8001, KEY_TEXT },
{ 0x8002, KEY_HOME },
{ 0x8003, KEY_POWER },
{ 0x8004, KEY_RED },
{ 0x8005, KEY_GREEN },
{ 0x8006, KEY_YELLOW },
{ 0x8007, KEY_BLUE },
{ 0x8008, KEY_DVD },
{ 0x8009, KEY_AUDIO },
{ 0x800a, KEY_IMAGES }, /* Pictures */
{ 0x800b, KEY_VIDEO },
{ 0x800c, KEY_BACK },
{ 0x800d, KEY_UP },
{ 0x800e, KEY_RADIO },
{ 0x800f, KEY_EPG },
{ 0x8010, KEY_LEFT },
{ 0x8011, KEY_OK },
{ 0x8012, KEY_RIGHT },
{ 0x8013, KEY_UNKNOWN }, /* SAP */
{ 0x8014, KEY_TV },
{ 0x8015, KEY_DOWN },
{ 0x8016, KEY_MENU }, /* DVD Menu */
{ 0x8017, KEY_LAST },
{ 0x8018, KEY_RECORD },
{ 0x8019, KEY_STOP },
{ 0x801a, KEY_PAUSE },
{ 0x801b, KEY_PLAY },
{ 0x801c, KEY_PREVIOUS },
{ 0x801d, KEY_REWIND },
{ 0x801e, KEY_FASTFORWARD },
{ 0x801f, KEY_NEXT},
{ 0x8040, KEY_1 },
{ 0x8041, KEY_2 },
{ 0x8042, KEY_3 },
{ 0x8043, KEY_CHANNELUP },
{ 0x8044, KEY_4 },
{ 0x8045, KEY_5 },
{ 0x8046, KEY_6 },
{ 0x8047, KEY_CHANNELDOWN },
{ 0x8048, KEY_7 },
{ 0x8049, KEY_8 },
{ 0x804a, KEY_9 },
{ 0x804b, KEY_VOLUMEUP },
{ 0x804c, KEY_CLEAR },
{ 0x804d, KEY_0 },
{ 0x804e, KEY_ENTER },
{ 0x804f, KEY_VOLUMEDOWN },
};
EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
dvb_usb_generic_rw(d,&cmd,1,key,5,0);
dvb_usb_nec_rc_key_to_event(d,key,event,state);
if (key[0] != 0)
deb_info("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]);
return 0;
}
EXPORT_SYMBOL(dibusb_rc_query);
| gpl-2.0 |
burstlam/zte-blade-35 | net/decnet/dn_nsp_in.c | 3555 | 21604 | /*
* DECnet An implementation of the DECnet protocol suite for the LINUX
* operating system. DECnet is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* DECnet Network Services Protocol (Input)
*
* Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
*
* Changes:
*
* Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
* original dn_nsp.c.
* Steve Whitehouse: Updated to work with my new routing architecture.
* Steve Whitehouse: Add changes from Eduardo Serrat's patches.
* Steve Whitehouse: Put all ack handling code in a common routine.
* Steve Whitehouse: Put other common bits into dn_nsp_rx()
* Steve Whitehouse: More checks on skb->len to catch bogus packets
* Fixed various race conditions and possible nasties.
* Steve Whitehouse: Now handles returned conninit frames.
* David S. Miller: New socket locking
* Steve Whitehouse: Fixed lockup when socket filtering was enabled.
* Paul Koning: Fix to push CC sockets into RUN when acks are
* received.
* Steve Whitehouse:
* Patrick Caulfield: Checking conninits for correctness & sending of error
* responses.
* Steve Whitehouse: Added backlog congestion level return codes.
* Patrick Caulfield:
* Steve Whitehouse: Added flow control support (outbound)
* Steve Whitehouse: Prepare for nonlinear skbs
*/
/******************************************************************************
(c) 1995-1998 E.M. Serrat emserrat@geocities.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*******************************************************************************/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/inet.h>
#include <linux/route.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/termios.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/netfilter_decnet.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/dn.h>
#include <net/dn_nsp.h>
#include <net/dn_dev.h>
#include <net/dn_route.h>
extern int decnet_log_martians;
static void dn_log_martian(struct sk_buff *skb, const char *msg)
{
if (decnet_log_martians && net_ratelimit()) {
char *devname = skb->dev ? skb->dev->name : "???";
struct dn_skb_cb *cb = DN_SKB_CB(skb);
printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port));
}
}
/*
* For this function we've flipped the cross-subchannel bit
* if the message is an otherdata or linkservice message. Thus
* we can use it to work out what to update.
*/
static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
{
struct dn_scp *scp = DN_SK(sk);
unsigned short type = ((ack >> 12) & 0x0003);
int wakeup = 0;
switch(type) {
case 0: /* ACK - Data */
if (dn_after(ack, scp->ackrcv_dat)) {
scp->ackrcv_dat = ack & 0x0fff;
wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack);
}
break;
case 1: /* NAK - Data */
break;
case 2: /* ACK - OtherData */
if (dn_after(ack, scp->ackrcv_oth)) {
scp->ackrcv_oth = ack & 0x0fff;
wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack);
}
break;
case 3: /* NAK - OtherData */
break;
}
if (wakeup && !sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
}
/*
* This function is a universal ack processor.
*/
static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
{
__le16 *ptr = (__le16 *)skb->data;
int len = 0;
unsigned short ack;
if (skb->len < 2)
return len;
if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
skb_pull(skb, 2);
ptr++;
len += 2;
if ((ack & 0x4000) == 0) {
if (oth)
ack ^= 0x2000;
dn_ack(sk, skb, ack);
}
}
if (skb->len < 2)
return len;
if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
skb_pull(skb, 2);
len += 2;
if ((ack & 0x4000) == 0) {
if (oth)
ack ^= 0x2000;
dn_ack(sk, skb, ack);
}
}
return len;
}
/**
* dn_check_idf - Check an image data field format is correct.
* @pptr: Pointer to pointer to image data
* @len: Pointer to length of image data
* @max: The maximum allowed length of the data in the image data field
* @follow_on: Check that this many bytes exist beyond the end of the image data
*
* Returns: 0 if ok, -1 on error
*/
static inline int dn_check_idf(unsigned char **pptr, int *len, unsigned char max, unsigned char follow_on)
{
unsigned char *ptr = *pptr;
unsigned char flen = *ptr++;
(*len)--;
if (flen > max)
return -1;
if ((flen + follow_on) > *len)
return -1;
*len -= flen;
*pptr = ptr + flen;
return 0;
}
/*
* Table of reason codes to pass back to node which sent us a badly
* formed message, plus text messages for the log. A zero entry in
* the reason field means "don't reply" otherwise a disc init is sent with
* the specified reason code.
*/
static struct {
unsigned short reason;
const char *text;
} ci_err_table[] = {
{ 0, "CI: Truncated message" },
{ NSP_REASON_ID, "CI: Destination username error" },
{ NSP_REASON_ID, "CI: Destination username type" },
{ NSP_REASON_US, "CI: Source username error" },
{ 0, "CI: Truncated at menuver" },
{ 0, "CI: Truncated before access or user data" },
{ NSP_REASON_IO, "CI: Access data format error" },
{ NSP_REASON_IO, "CI: User data format error" }
};
/*
* This function uses a slightly different lookup method
* to find its sockets, since it searches on object name/number
* rather than port numbers. Various tests are done to ensure that
* the incoming data is in the correct format before it is queued to
* a socket.
*/
static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data;
struct sockaddr_dn dstaddr;
struct sockaddr_dn srcaddr;
unsigned char type = 0;
int dstlen;
int srclen;
unsigned char *ptr;
int len;
int err = 0;
unsigned char menuver;
memset(&dstaddr, 0, sizeof(struct sockaddr_dn));
memset(&srcaddr, 0, sizeof(struct sockaddr_dn));
/*
* 1. Decode & remove message header
*/
cb->src_port = msg->srcaddr;
cb->dst_port = msg->dstaddr;
cb->services = msg->services;
cb->info = msg->info;
cb->segsize = le16_to_cpu(msg->segsize);
if (!pskb_may_pull(skb, sizeof(*msg)))
goto err_out;
skb_pull(skb, sizeof(*msg));
len = skb->len;
ptr = skb->data;
/*
* 2. Check destination end username format
*/
dstlen = dn_username2sockaddr(ptr, len, &dstaddr, &type);
err++;
if (dstlen < 0)
goto err_out;
err++;
if (type > 1)
goto err_out;
len -= dstlen;
ptr += dstlen;
/*
* 3. Check source end username format
*/
srclen = dn_username2sockaddr(ptr, len, &srcaddr, &type);
err++;
if (srclen < 0)
goto err_out;
len -= srclen;
ptr += srclen;
err++;
if (len < 1)
goto err_out;
menuver = *ptr;
ptr++;
len--;
/*
* 4. Check that optional data actually exists if menuver says it does
*/
err++;
if ((menuver & (DN_MENUVER_ACC | DN_MENUVER_USR)) && (len < 1))
goto err_out;
/*
* 5. Check optional access data format
*/
err++;
if (menuver & DN_MENUVER_ACC) {
if (dn_check_idf(&ptr, &len, 39, 1))
goto err_out;
if (dn_check_idf(&ptr, &len, 39, 1))
goto err_out;
if (dn_check_idf(&ptr, &len, 39, (menuver & DN_MENUVER_USR) ? 1 : 0))
goto err_out;
}
/*
* 6. Check optional user data format
*/
err++;
if (menuver & DN_MENUVER_USR) {
if (dn_check_idf(&ptr, &len, 16, 0))
goto err_out;
}
/*
* 7. Look up socket based on destination end username
*/
return dn_sklist_find_listener(&dstaddr);
err_out:
dn_log_martian(skb, ci_err_table[err].text);
*reason = ci_err_table[err].reason;
return NULL;
}
static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
{
if (sk_acceptq_is_full(sk)) {
kfree_skb(skb);
return;
}
sk->sk_ack_backlog++;
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
}
static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct dn_scp *scp = DN_SK(sk);
unsigned char *ptr;
if (skb->len < 4)
goto out;
ptr = skb->data;
cb->services = *ptr++;
cb->info = *ptr++;
cb->segsize = le16_to_cpu(*(__le16 *)ptr);
if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
scp->persist = 0;
scp->addrrem = cb->src_port;
sk->sk_state = TCP_ESTABLISHED;
scp->state = DN_RUN;
scp->services_rem = cb->services;
scp->info_rem = cb->info;
scp->segsize_rem = cb->segsize;
if ((scp->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
scp->max_window = decnet_no_fc_max_cwnd;
if (skb->len > 0) {
u16 dlen = *skb->data;
if ((dlen <= 16) && (dlen <= skb->len)) {
scp->conndata_in.opt_optl = cpu_to_le16(dlen);
skb_copy_from_linear_data_offset(skb, 1,
scp->conndata_in.opt_data, dlen);
}
}
dn_nsp_send_link(sk, DN_NOCHANGE, 0);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
}
out:
kfree_skb(skb);
}
static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CI) {
scp->state = DN_CD;
scp->persist = 0;
}
kfree_skb(skb);
}
static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned short reason;
if (skb->len < 2)
goto out;
reason = le16_to_cpu(*(__le16 *)skb->data);
skb_pull(skb, 2);
scp->discdata_in.opt_status = cpu_to_le16(reason);
scp->discdata_in.opt_optl = 0;
memset(scp->discdata_in.opt_data, 0, 16);
if (skb->len > 0) {
u16 dlen = *skb->data;
if ((dlen <= 16) && (dlen <= skb->len)) {
scp->discdata_in.opt_optl = cpu_to_le16(dlen);
skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen);
}
}
scp->addrrem = cb->src_port;
sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
case DN_CD:
scp->state = DN_RJ;
sk->sk_err = ECONNREFUSED;
break;
case DN_RUN:
sk->sk_shutdown |= SHUTDOWN_MASK;
scp->state = DN_DN;
break;
case DN_DI:
scp->state = DN_DIC;
break;
}
if (!sock_flag(sk, SOCK_DEAD)) {
if (sk->sk_socket->state != SS_UNCONNECTED)
sk->sk_socket->state = SS_DISCONNECTING;
sk->sk_state_change(sk);
}
/*
* It appears that its possible for remote machines to send disc
* init messages with no port identifier if we are in the CI and
* possibly also the CD state. Obviously we shouldn't reply with
* a message if we don't know what the end point is.
*/
if (scp->addrrem) {
dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
}
scp->persist_fxn = dn_destroy_timer;
scp->persist = dn_nsp_persist(sk);
out:
kfree_skb(skb);
}
/*
* disc_conf messages are also called no_resources or no_link
* messages depending upon the "reason" field.
*/
static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
unsigned short reason;
if (skb->len != 2)
goto out;
reason = le16_to_cpu(*(__le16 *)skb->data);
sk->sk_state = TCP_CLOSE;
switch(scp->state) {
case DN_CI:
scp->state = DN_NR;
break;
case DN_DR:
if (reason == NSP_REASON_DC)
scp->state = DN_DRC;
if (reason == NSP_REASON_NL)
scp->state = DN_CN;
break;
case DN_DI:
scp->state = DN_DIC;
break;
case DN_RUN:
sk->sk_shutdown |= SHUTDOWN_MASK;
case DN_CC:
scp->state = DN_CN;
}
if (!sock_flag(sk, SOCK_DEAD)) {
if (sk->sk_socket->state != SS_UNCONNECTED)
sk->sk_socket->state = SS_DISCONNECTING;
sk->sk_state_change(sk);
}
scp->persist_fxn = dn_destroy_timer;
scp->persist = dn_nsp_persist(sk);
out:
kfree_skb(skb);
}
static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
unsigned short segnum;
unsigned char lsflags;
signed char fcval;
int wake_up = 0;
char *ptr = skb->data;
unsigned char fctype = scp->services_rem & NSP_FC_MASK;
if (skb->len != 4)
goto out;
segnum = le16_to_cpu(*(__le16 *)ptr);
ptr += 2;
lsflags = *(unsigned char *)ptr++;
fcval = *ptr;
/*
* Here we ignore erronous packets which should really
* should cause a connection abort. It is not critical
* for now though.
*/
if (lsflags & 0xf8)
goto out;
if (seq_next(scp->numoth_rcv, segnum)) {
seq_add(&scp->numoth_rcv, 1);
switch(lsflags & 0x04) { /* FCVAL INT */
case 0x00: /* Normal Request */
switch(lsflags & 0x03) { /* FCVAL MOD */
case 0x00: /* Request count */
if (fcval < 0) {
unsigned char p_fcval = -fcval;
if ((scp->flowrem_dat > p_fcval) &&
(fctype == NSP_FC_SCMC)) {
scp->flowrem_dat -= p_fcval;
}
} else if (fcval > 0) {
scp->flowrem_dat += fcval;
wake_up = 1;
}
break;
case 0x01: /* Stop outgoing data */
scp->flowrem_sw = DN_DONTSEND;
break;
case 0x02: /* Ok to start again */
scp->flowrem_sw = DN_SEND;
dn_nsp_output(sk);
wake_up = 1;
}
break;
case 0x04: /* Interrupt Request */
if (fcval > 0) {
scp->flowrem_oth += fcval;
wake_up = 1;
}
break;
}
if (wake_up && !sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
}
dn_nsp_send_oth_ack(sk);
out:
kfree_skb(skb);
}
/*
* Copy of sock_queue_rcv_skb (from sock.h) without
* bh_lock_sock() (its already held when this is called) which
* also allows data and other data to be queued to a socket.
*/
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
int err;
int skb_len;
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
*/
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf) {
err = -ENOMEM;
goto out;
}
err = sk_filter(sk, skb);
if (err)
goto out;
skb_len = skb->len;
skb_set_owner_r(skb, sk);
skb_queue_tail(queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb_len);
out:
return err;
}
static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
unsigned short segnum;
struct dn_skb_cb *cb = DN_SKB_CB(skb);
int queued = 0;
if (skb->len < 2)
goto out;
cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
skb_pull(skb, 2);
if (seq_next(scp->numoth_rcv, segnum)) {
if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
seq_add(&scp->numoth_rcv, 1);
scp->other_report = 0;
queued = 1;
}
}
dn_nsp_send_oth_ack(sk);
out:
if (!queued)
kfree_skb(skb);
}
static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
unsigned short segnum;
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct dn_scp *scp = DN_SK(sk);
if (skb->len < 2)
goto out;
cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
skb_pull(skb, 2);
if (seq_next(scp->numdat_rcv, segnum)) {
if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
seq_add(&scp->numdat_rcv, 1);
queued = 1;
}
if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
scp->flowloc_sw = DN_DONTSEND;
dn_nsp_send_link(sk, DN_DONTSEND, 0);
}
}
dn_nsp_send_data_ack(sk);
out:
if (!queued)
kfree_skb(skb);
}
/*
* If one of our conninit messages is returned, this function
* deals with it. It puts the socket into the NO_COMMUNICATION
* state.
*/
static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
if (scp->state == DN_CI) {
scp->state = DN_NC;
sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
}
kfree_skb(skb);
}
static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
int ret = NET_RX_DROP;
/* Must not reply to returned packets */
if (cb->rt_flags & DN_RT_F_RTS)
goto out;
if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
switch(cb->nsp_flags & 0x70) {
case 0x10:
case 0x60: /* (Retransmitted) Connect Init */
dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
ret = NET_RX_SUCCESS;
break;
case 0x20: /* Connect Confirm */
dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
ret = NET_RX_SUCCESS;
break;
}
}
out:
kfree_skb(skb);
return ret;
}
static int dn_nsp_rx_packet(struct sk_buff *skb)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sock *sk = NULL;
unsigned char *ptr = (unsigned char *)skb->data;
unsigned short reason = NSP_REASON_NL;
if (!pskb_may_pull(skb, 2))
goto free_out;
skb_reset_transport_header(skb);
cb->nsp_flags = *ptr++;
if (decnet_debug_level & 2)
printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags);
if (cb->nsp_flags & 0x83)
goto free_out;
/*
* Filter out conninits and useless packet types
*/
if ((cb->nsp_flags & 0x0c) == 0x08) {
switch(cb->nsp_flags & 0x70) {
case 0x00: /* NOP */
case 0x70: /* Reserved */
case 0x50: /* Reserved, Phase II node init */
goto free_out;
case 0x10:
case 0x60:
if (unlikely(cb->rt_flags & DN_RT_F_RTS))
goto free_out;
sk = dn_find_listener(skb, &reason);
goto got_it;
}
}
if (!pskb_may_pull(skb, 3))
goto free_out;
/*
* Grab the destination address.
*/
cb->dst_port = *(__le16 *)ptr;
cb->src_port = 0;
ptr += 2;
/*
* If not a connack, grab the source address too.
*/
if (pskb_may_pull(skb, 5)) {
cb->src_port = *(__le16 *)ptr;
ptr += 2;
skb_pull(skb, 5);
}
/*
* Returned packets...
* Swap src & dst and look up in the normal way.
*/
if (unlikely(cb->rt_flags & DN_RT_F_RTS)) {
__le16 tmp = cb->dst_port;
cb->dst_port = cb->src_port;
cb->src_port = tmp;
tmp = cb->dst;
cb->dst = cb->src;
cb->src = tmp;
}
/*
* Find the socket to which this skb is destined.
*/
sk = dn_find_by_skb(skb);
got_it:
if (sk != NULL) {
struct dn_scp *scp = DN_SK(sk);
/* Reset backoff */
scp->nsp_rxtshift = 0;
/*
* We linearize everything except data segments here.
*/
if (cb->nsp_flags & ~0x60) {
if (unlikely(skb_linearize(skb)))
goto free_out;
}
return sk_receive_skb(sk, skb, 0);
}
return dn_nsp_no_socket(skb, reason);
free_out:
kfree_skb(skb);
return NET_RX_DROP;
}
int dn_nsp_rx(struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL,
dn_nsp_rx_packet);
}
/*
* This is the main receive routine for sockets. It is called
* from the above when the socket is not busy, and also from
* sock_release() when there is a backlog queued up.
*/
int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct dn_scp *scp = DN_SK(sk);
struct dn_skb_cb *cb = DN_SKB_CB(skb);
if (cb->rt_flags & DN_RT_F_RTS) {
if (cb->nsp_flags == 0x18 || cb->nsp_flags == 0x68)
dn_returned_conn_init(sk, skb);
else
kfree_skb(skb);
return NET_RX_SUCCESS;
}
/*
* Control packet.
*/
if ((cb->nsp_flags & 0x0c) == 0x08) {
switch(cb->nsp_flags & 0x70) {
case 0x10:
case 0x60:
dn_nsp_conn_init(sk, skb);
break;
case 0x20:
dn_nsp_conn_conf(sk, skb);
break;
case 0x30:
dn_nsp_disc_init(sk, skb);
break;
case 0x40:
dn_nsp_disc_conf(sk, skb);
break;
}
} else if (cb->nsp_flags == 0x24) {
/*
* Special for connacks, 'cos they don't have
* ack data or ack otherdata info.
*/
dn_nsp_conn_ack(sk, skb);
} else {
int other = 1;
/* both data and ack frames can kick a CC socket into RUN */
if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
scp->state = DN_RUN;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_state_change(sk);
}
if ((cb->nsp_flags & 0x1c) == 0)
other = 0;
if (cb->nsp_flags == 0x04)
other = 0;
/*
* Read out ack data here, this applies equally
* to data, other data, link serivce and both
* ack data and ack otherdata.
*/
dn_process_ack(sk, skb, other);
/*
* If we've some sort of data here then call a
* suitable routine for dealing with it, otherwise
* the packet is an ack and can be discarded.
*/
if ((cb->nsp_flags & 0x0c) == 0) {
if (scp->state != DN_RUN)
goto free_out;
switch(cb->nsp_flags) {
case 0x10: /* LS */
dn_nsp_linkservice(sk, skb);
break;
case 0x30: /* OD */
dn_nsp_otherdata(sk, skb);
break;
default:
dn_nsp_data(sk, skb);
}
} else { /* Ack, chuck it out here */
free_out:
kfree_skb(skb);
}
}
return NET_RX_SUCCESS;
}
| gpl-2.0 |
wimpknocker/android_kernel_samsung_viennalte | arch/arm/plat-s5p/irq-eint.c | 4579 | 5259 | /* linux/arch/arm/plat-s5p/irq-eint.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P - IRQ EINT support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <asm/hardware/vic.h>
#include <plat/regs-irqtype.h>
#include <mach/map.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-cfg.h>
#include <mach/regs-gpio.h>
static inline void s5p_irq_eint_mask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask |= eint_irq_to_bit(data->irq);
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_unmask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask &= ~(eint_irq_to_bit(data->irq));
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static inline void s5p_irq_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_maskack(struct irq_data *data)
{
/* compiler should in-line these */
s5p_irq_eint_mask(data);
s5p_irq_eint_ack(data);
}
static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
int offs = EINT_OFFSET(data->irq);
int shift;
u32 ctrl, mask;
u32 newvalue = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
newvalue = S5P_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
break;
default:
printk(KERN_ERR "No such irq type %d", type);
return -EINVAL;
}
shift = (offs & 0x7) * 4;
mask = 0x7 << shift;
ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
ctrl &= ~mask;
ctrl |= newvalue << shift;
__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
if ((0 <= offs) && (offs < 8))
s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
else if ((8 <= offs) && (offs < 16))
s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
else if ((16 <= offs) && (offs < 24))
s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
else if ((24 <= offs) && (offs < 32))
s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
else
printk(KERN_ERR "No such irq number %d", offs);
return 0;
}
static struct irq_chip s5p_irq_eint = {
.name = "s5p-eint",
.irq_mask = s5p_irq_eint_mask,
.irq_unmask = s5p_irq_eint_unmask,
.irq_mask_ack = s5p_irq_eint_maskack,
.irq_ack = s5p_irq_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
/* s5p_irq_demux_eint
*
* This function demuxes the IRQ from the group0 external interrupts,
* from EINTs 16 to 31. It is designed to be inlined into the specific
* handler s5p_irq_demux_eintX_Y.
*
* Each EINT pend/mask registers handle eight of them.
*/
static inline void s5p_irq_demux_eint(unsigned int start)
{
u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
unsigned int irq;
status &= ~mask;
status &= 0xff;
while (status) {
irq = fls(status) - 1;
generic_handle_irq(irq + start);
status &= ~(1 << irq);
}
}
static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
s5p_irq_demux_eint(IRQ_EINT(16));
s5p_irq_demux_eint(IRQ_EINT(24));
}
static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_mask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
}
static void s5p_irq_vic_eint_unmask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_unmask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
}
static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_vic_eint_maskack(struct irq_data *data)
{
s5p_irq_vic_eint_mask(data);
s5p_irq_vic_eint_ack(data);
}
static struct irq_chip s5p_irq_vic_eint = {
.name = "s5p_vic_eint",
.irq_mask = s5p_irq_vic_eint_mask,
.irq_unmask = s5p_irq_vic_eint_unmask,
.irq_mask_ack = s5p_irq_vic_eint_maskack,
.irq_ack = s5p_irq_vic_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
static int __init s5p_init_irq_eint(void)
{
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
irq_set_chip(irq, &s5p_irq_vic_eint);
for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
return 0;
}
arch_initcall(s5p_init_irq_eint);
| gpl-2.0 |
SaberMod/lge-kernel-mako | drivers/usb/host/ohci-omap3.c | 5091 | 6009 | /*
* ohci-omap3.c - driver for OHCI on OMAP3 and later processors
*
* Bus Glue for OMAP3 USBHOST 3 port OHCI controller
* This controller is also used in later OMAPs and AM35x chips
*
* Copyright (C) 2007-2010 Texas Instruments, Inc.
* Author: Vikram Pandita <vikram.pandita@ti.com>
* Author: Anand Gadiyar <gadiyar@ti.com>
* Author: Keshava Munegowda <keshava_mgowda@ti.com>
*
* Based on ehci-omap.c and some other ohci glue layers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* TODO (last updated Feb 27, 2011):
* - add kernel-doc
*/
#include <linux/platform_device.h>
#include <plat/usb.h>
#include <linux/pm_runtime.h>
/*-------------------------------------------------------------------------*/
static int ohci_omap3_init(struct usb_hcd *hcd)
{
dev_dbg(hcd->self.controller, "starting OHCI controller\n");
return ohci_init(hcd_to_ohci(hcd));
}
/*-------------------------------------------------------------------------*/
static int ohci_omap3_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
/*
* RemoteWakeupConnected has to be set explicitly before
* calling ohci_run. The reset value of RWC is 0.
*/
ohci->hc_control = OHCI_CTRL_RWC;
writel(OHCI_CTRL_RWC, &ohci->regs->control);
ret = ohci_run(ohci);
if (ret < 0) {
dev_err(hcd->self.controller, "can't start\n");
ohci_stop(hcd);
}
return ret;
}
/*-------------------------------------------------------------------------*/
static const struct hc_driver ohci_omap3_hc_driver = {
.description = hcd_name,
.product_desc = "OMAP3 OHCI Host Controller",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ohci_omap3_init,
.start = ohci_omap3_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
/*-------------------------------------------------------------------------*/
/*
* configure so an HC device and id are always provided
* always called with process context; sleeping is OK
*/
/**
* ohci_hcd_omap3_probe - initialize OMAP-based HCDs
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = NULL;
void __iomem *regs = NULL;
struct resource *res;
int ret = -ENODEV;
int irq;
if (usb_disabled())
return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
return -ENODEV;
}
irq = platform_get_irq_byname(pdev, "ohci-irq");
if (irq < 0) {
dev_err(dev, "OHCI irq failed\n");
return -ENODEV;
}
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ohci");
if (!res) {
dev_err(dev, "UHH OHCI get resource failed\n");
return -ENOMEM;
}
regs = ioremap(res->start, resource_size(res));
if (!regs) {
dev_err(dev, "UHH OHCI ioremap failed\n");
return -ENOMEM;
}
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
dev_name(dev));
if (!hcd) {
dev_err(dev, "usb_create_hcd failed\n");
goto err_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
ret = usb_add_hcd(hcd, irq, 0);
if (ret) {
dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
return 0;
err_add_hcd:
pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
err_io:
iounmap(regs);
return ret;
}
/*
* may be called without controller electrically present
* may be called with controller, bus, and devices active
*/
/**
* ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs
* @pdev: USB Host Controller being removed
*
* Reverses the effect of ohci_hcd_omap3_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*/
static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
iounmap(hcd->regs);
usb_remove_hcd(hcd);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
usb_put_hcd(hcd);
return 0;
}
static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
static struct platform_driver ohci_hcd_omap3_driver = {
.probe = ohci_hcd_omap3_probe,
.remove = __devexit_p(ohci_hcd_omap3_remove),
.shutdown = ohci_hcd_omap3_shutdown,
.driver = {
.name = "ohci-omap3",
},
};
MODULE_ALIAS("platform:ohci-omap3");
MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
| gpl-2.0 |
NamelessRom/android_kernel_xiaomi_armani | drivers/staging/omapdrm/omap_debugfs.c | 5091 | 3466 | /*
* drivers/staging/omapdrm/omap_debugfs.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
#include "drm_fb_helper.h"
#ifdef CONFIG_DEBUG_FS
static int gem_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
seq_printf(m, "All Objects:\n");
omap_gem_describe_objects(&priv->obj_list, m);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
return drm_mm_dump_table(m, dev->mm_private);
}
static int fb_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb;
int ret;
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
if (ret)
return ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret) {
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
seq_printf(m, "fbcon ");
omap_framebuffer_describe(priv->fbdev->fb, m);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == priv->fbdev->fb)
continue;
seq_printf(m, "user ");
omap_framebuffer_describe(fb, m);
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
/* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = {
{"gem", gem_show, 0},
{"mm", mm_show, 0},
{"fb", fb_show, 0},
};
/* list of debugfs files that are specific to devices with dmm/tiler */
static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
int omap_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
int ret;
ret = drm_debugfs_create_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list),
minor->debugfs_root, minor);
if (ret) {
dev_err(dev->dev, "could not install omap_debugfs_list\n");
return ret;
}
if (dmm_is_available())
ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
ARRAY_SIZE(omap_dmm_debugfs_list),
minor->debugfs_root, minor);
if (ret) {
dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
return ret;
}
return ret;
}
void omap_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(omap_debugfs_list,
ARRAY_SIZE(omap_debugfs_list), minor);
if (dmm_is_available())
drm_debugfs_remove_files(omap_dmm_debugfs_list,
ARRAY_SIZE(omap_dmm_debugfs_list), minor);
}
#endif
| gpl-2.0 |
tidatida/linux-stable-grsec | fs/freevxfs/vxfs_fshead.c | 12515 | 5527 | /*
* Copyright (c) 2000-2001 Christoph Hellwig.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Veritas filesystem driver - fileset header routines.
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "vxfs.h"
#include "vxfs_inode.h"
#include "vxfs_extern.h"
#include "vxfs_fshead.h"
#ifdef DIAGNOSTIC
static void
vxfs_dumpfsh(struct vxfs_fsh *fhp)
{
printk("\n\ndumping fileset header:\n");
printk("----------------------------\n");
printk("version: %u\n", fhp->fsh_version);
printk("fsindex: %u\n", fhp->fsh_fsindex);
printk("iauino: %u\tninodes:%u\n",
fhp->fsh_iauino, fhp->fsh_ninodes);
printk("maxinode: %u\tlctino: %u\n",
fhp->fsh_maxinode, fhp->fsh_lctino);
printk("nau: %u\n", fhp->fsh_nau);
printk("ilistino[0]: %u\tilistino[1]: %u\n",
fhp->fsh_ilistino[0], fhp->fsh_ilistino[1]);
}
#endif
/**
* vxfs_getfsh - read fileset header into memory
* @ip: the (fake) fileset header inode
* @which: 0 for the structural, 1 for the primary fsh.
*
* Description:
* vxfs_getfsh reads either the structural or primary fileset header
* described by @ip into memory.
*
* Returns:
* The fileset header structure on success, else Zero.
*/
static struct vxfs_fsh *
vxfs_getfsh(struct inode *ip, int which)
{
struct buffer_head *bp;
bp = vxfs_bread(ip, which);
if (bp) {
struct vxfs_fsh *fhp;
if (!(fhp = kmalloc(sizeof(*fhp), GFP_KERNEL)))
goto out;
memcpy(fhp, bp->b_data, sizeof(*fhp));
put_bh(bp);
return (fhp);
}
out:
brelse(bp);
return NULL;
}
/**
* vxfs_read_fshead - read the fileset headers
* @sbp: superblock to which the fileset belongs
*
* Description:
* vxfs_read_fshead will fill the inode and structural inode list in @sb.
*
* Returns:
* Zero on success, else a negative error code (-EINVAL).
*/
int
vxfs_read_fshead(struct super_block *sbp)
{
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
struct vxfs_fsh *pfp, *sfp;
struct vxfs_inode_info *vip, *tip;
vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
if (!vip) {
printk(KERN_ERR "vxfs: unable to read fsh inode\n");
return -EINVAL;
}
if (!VXFS_ISFSH(vip)) {
printk(KERN_ERR "vxfs: fsh list inode is of wrong type (%x)\n",
vip->vii_mode & VXFS_TYPE_MASK);
goto out_free_fship;
}
#ifdef DIAGNOSTIC
printk("vxfs: fsh inode dump:\n");
vxfs_dumpi(vip, infp->vsi_fshino);
#endif
infp->vsi_fship = vxfs_get_fake_inode(sbp, vip);
if (!infp->vsi_fship) {
printk(KERN_ERR "vxfs: unable to get fsh inode\n");
goto out_free_fship;
}
sfp = vxfs_getfsh(infp->vsi_fship, 0);
if (!sfp) {
printk(KERN_ERR "vxfs: unable to get structural fsh\n");
goto out_iput_fship;
}
#ifdef DIAGNOSTIC
vxfs_dumpfsh(sfp);
#endif
pfp = vxfs_getfsh(infp->vsi_fship, 1);
if (!pfp) {
printk(KERN_ERR "vxfs: unable to get primary fsh\n");
goto out_free_sfp;
}
#ifdef DIAGNOSTIC
vxfs_dumpfsh(pfp);
#endif
tip = vxfs_blkiget(sbp, infp->vsi_iext, sfp->fsh_ilistino[0]);
if (!tip)
goto out_free_pfp;
infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip);
if (!infp->vsi_stilist) {
printk(KERN_ERR "vxfs: unable to get structural list inode\n");
kfree(tip);
goto out_free_pfp;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_stilist;
}
tip = vxfs_stiget(sbp, pfp->fsh_ilistino[0]);
if (!tip)
goto out_iput_stilist;
infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip);
if (!infp->vsi_ilist) {
printk(KERN_ERR "vxfs: unable to get inode list inode\n");
kfree(tip);
goto out_iput_stilist;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_ilist))) {
printk(KERN_ERR "vxfs: inode list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_ilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_ilist;
}
return 0;
out_iput_ilist:
iput(infp->vsi_ilist);
out_iput_stilist:
iput(infp->vsi_stilist);
out_free_pfp:
kfree(pfp);
out_free_sfp:
kfree(sfp);
out_iput_fship:
iput(infp->vsi_fship);
return -EINVAL;
out_free_fship:
kfree(vip);
return -EINVAL;
}
| gpl-2.0 |
cdesjardins/DTS-Eagle-Integration_CAF-Android-kernel | fs/freevxfs/vxfs_fshead.c | 12515 | 5527 | /*
* Copyright (c) 2000-2001 Christoph Hellwig.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Veritas filesystem driver - fileset header routines.
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "vxfs.h"
#include "vxfs_inode.h"
#include "vxfs_extern.h"
#include "vxfs_fshead.h"
#ifdef DIAGNOSTIC
static void
vxfs_dumpfsh(struct vxfs_fsh *fhp)
{
printk("\n\ndumping fileset header:\n");
printk("----------------------------\n");
printk("version: %u\n", fhp->fsh_version);
printk("fsindex: %u\n", fhp->fsh_fsindex);
printk("iauino: %u\tninodes:%u\n",
fhp->fsh_iauino, fhp->fsh_ninodes);
printk("maxinode: %u\tlctino: %u\n",
fhp->fsh_maxinode, fhp->fsh_lctino);
printk("nau: %u\n", fhp->fsh_nau);
printk("ilistino[0]: %u\tilistino[1]: %u\n",
fhp->fsh_ilistino[0], fhp->fsh_ilistino[1]);
}
#endif
/**
* vxfs_getfsh - read fileset header into memory
* @ip: the (fake) fileset header inode
* @which: 0 for the structural, 1 for the primary fsh.
*
* Description:
* vxfs_getfsh reads either the structural or primary fileset header
* described by @ip into memory.
*
* Returns:
* The fileset header structure on success, else Zero.
*/
static struct vxfs_fsh *
vxfs_getfsh(struct inode *ip, int which)
{
struct buffer_head *bp;
bp = vxfs_bread(ip, which);
if (bp) {
struct vxfs_fsh *fhp;
if (!(fhp = kmalloc(sizeof(*fhp), GFP_KERNEL)))
goto out;
memcpy(fhp, bp->b_data, sizeof(*fhp));
put_bh(bp);
return (fhp);
}
out:
brelse(bp);
return NULL;
}
/**
* vxfs_read_fshead - read the fileset headers
* @sbp: superblock to which the fileset belongs
*
* Description:
* vxfs_read_fshead will fill the inode and structural inode list in @sb.
*
* Returns:
* Zero on success, else a negative error code (-EINVAL).
*/
int
vxfs_read_fshead(struct super_block *sbp)
{
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
struct vxfs_fsh *pfp, *sfp;
struct vxfs_inode_info *vip, *tip;
vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
if (!vip) {
printk(KERN_ERR "vxfs: unable to read fsh inode\n");
return -EINVAL;
}
if (!VXFS_ISFSH(vip)) {
printk(KERN_ERR "vxfs: fsh list inode is of wrong type (%x)\n",
vip->vii_mode & VXFS_TYPE_MASK);
goto out_free_fship;
}
#ifdef DIAGNOSTIC
printk("vxfs: fsh inode dump:\n");
vxfs_dumpi(vip, infp->vsi_fshino);
#endif
infp->vsi_fship = vxfs_get_fake_inode(sbp, vip);
if (!infp->vsi_fship) {
printk(KERN_ERR "vxfs: unable to get fsh inode\n");
goto out_free_fship;
}
sfp = vxfs_getfsh(infp->vsi_fship, 0);
if (!sfp) {
printk(KERN_ERR "vxfs: unable to get structural fsh\n");
goto out_iput_fship;
}
#ifdef DIAGNOSTIC
vxfs_dumpfsh(sfp);
#endif
pfp = vxfs_getfsh(infp->vsi_fship, 1);
if (!pfp) {
printk(KERN_ERR "vxfs: unable to get primary fsh\n");
goto out_free_sfp;
}
#ifdef DIAGNOSTIC
vxfs_dumpfsh(pfp);
#endif
tip = vxfs_blkiget(sbp, infp->vsi_iext, sfp->fsh_ilistino[0]);
if (!tip)
goto out_free_pfp;
infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip);
if (!infp->vsi_stilist) {
printk(KERN_ERR "vxfs: unable to get structural list inode\n");
kfree(tip);
goto out_free_pfp;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
printk(KERN_ERR "vxfs: structural list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_stilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_stilist;
}
tip = vxfs_stiget(sbp, pfp->fsh_ilistino[0]);
if (!tip)
goto out_iput_stilist;
infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip);
if (!infp->vsi_ilist) {
printk(KERN_ERR "vxfs: unable to get inode list inode\n");
kfree(tip);
goto out_iput_stilist;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_ilist))) {
printk(KERN_ERR "vxfs: inode list inode is of wrong type (%x)\n",
VXFS_INO(infp->vsi_ilist)->vii_mode & VXFS_TYPE_MASK);
goto out_iput_ilist;
}
return 0;
out_iput_ilist:
iput(infp->vsi_ilist);
out_iput_stilist:
iput(infp->vsi_stilist);
out_free_pfp:
kfree(pfp);
out_free_sfp:
kfree(sfp);
out_iput_fship:
iput(infp->vsi_fship);
return -EINVAL;
out_free_fship:
kfree(vip);
return -EINVAL;
}
| gpl-2.0 |
sandy-harris/random.gcm | drivers/net/arcnet/arc-rawmode.c | 13283 | 5295 | /*
* Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers)
*
* Written 1994-1999 by Avery Pennarun.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: raw mode (`r') encapsulation support loaded.\n"
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length);
static int build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr);
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum);
static struct ArcProto rawmode_proto =
{
.suffix = 'r',
.mtu = XMTU,
.rx = rx,
.build_header = build_header,
.prepare_tx = prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
static int __init arcnet_raw_init(void)
{
int count;
printk(VERSION);
for (count = 0; count < 256; count++)
if (arc_proto_map[count] == arc_proto_default)
arc_proto_map[count] = &rawmode_proto;
/* for raw mode, we only set the bcast proto if there's no better one */
if (arc_bcast_proto == arc_proto_default)
arc_bcast_proto = &rawmode_proto;
arc_proto_default = &rawmode_proto;
return 0;
}
static void __exit arcnet_raw_exit(void)
{
arcnet_unregister_proto(&rawmode_proto);
}
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
MODULE_LICENSE("GPL");
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *skb;
struct archdr *pkt = pkthdr;
int ofs;
BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
if (length > MTU)
ofs = 512 - length;
else
ofs = 256 - length;
skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE);
skb->dev = dev;
pkt = (struct archdr *) skb->data;
skb_reset_mac_header(skb);
skb_pull(skb, ARC_HDR_SIZE);
/* up to sizeof(pkt->soft) has already been copied from the card */
memcpy(pkt, pkthdr, sizeof(struct archdr));
if (length > sizeof(pkt->soft))
lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt->soft.raw + sizeof(pkt->soft),
length - sizeof(pkt->soft));
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
netif_rx(skb);
}
/*
* Create the ARCnet hard/soft headers for raw mode.
* There aren't any soft headers in raw mode - not even the protocol id.
*/
static int build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
/*
* Set the source hardware address.
*
* This is pretty pointless for most purposes, but it can help in
* debugging. ARCnet does not allow us to change the source address in
* the actual packet sent)
*/
pkt->hard.source = *dev->dev_addr;
/* see linux/net/ethernet/eth.c to see where I got the following */
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
/*
* FIXME: fill in the last byte of the dest ipaddr here to better
* comply with RFC1051 in "noarp" mode.
*/
pkt->hard.dest = 0;
return hdr_size;
}
/* otherwise, just fill it in and go! */
pkt->hard.dest = daddr;
return hdr_size; /* success */
}
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct arc_hardware *hard = &pkt->hard;
int ofs;
BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
lp->next_tx, lp->cur_tx, bufnum);
length -= ARC_HDR_SIZE; /* hard header is not included in packet length */
if (length > XMTU) {
/* should never happen! other people already check for this. */
BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
length, XMTU);
length = XMTU;
}
if (length >= MinTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length;
} else if (length > MTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length - 3;
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
lp->lastload_dest = hard->dest;
return 1; /* done */
}
| gpl-2.0 |
riverzhou/kernel-c8500 | arch/mips/lasat/lasat_board.c | 14051 | 7154 | /*
* Thomas Horsten <thh@lasat.com>
* Copyright (C) 2000 LASAT Networks A/S.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Routines specific to the LASAT boards
*/
#include <linux/types.h>
#include <linux/crc32.h>
#include <asm/lasat/lasat.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <asm/addrspace.h>
#include "at93c.h"
/* New model description table */
#include "lasat_models.h"
static DEFINE_MUTEX(lasat_eeprom_mutex);
#define EEPROM_CRC(data, len) (~crc32(~0, data, len))
struct lasat_info lasat_board_info;
int EEPROMRead(unsigned int pos, unsigned char *data, int len)
{
int i;
for (i = 0; i < len; i++)
*data++ = at93c_read(pos++);
return 0;
}
int EEPROMWrite(unsigned int pos, unsigned char *data, int len)
{
int i;
for (i = 0; i < len; i++)
at93c_write(pos++, *data++);
return 0;
}
static void init_flash_sizes(void)
{
unsigned long *lb = lasat_board_info.li_flashpart_base;
unsigned long *ls = lasat_board_info.li_flashpart_size;
int i;
ls[LASAT_MTD_BOOTLOADER] = 0x40000;
ls[LASAT_MTD_SERVICE] = 0xC0000;
ls[LASAT_MTD_NORMAL] = 0x100000;
if (!IS_LASAT_200()) {
lasat_board_info.li_flash_base = 0x1e000000;
lb[LASAT_MTD_BOOTLOADER] = 0x1e400000;
if (lasat_board_info.li_flash_size > 0x200000) {
ls[LASAT_MTD_CONFIG] = 0x100000;
ls[LASAT_MTD_FS] = 0x500000;
}
} else {
lasat_board_info.li_flash_base = 0x10000000;
if (lasat_board_info.li_flash_size < 0x1000000) {
lb[LASAT_MTD_BOOTLOADER] = 0x10000000;
ls[LASAT_MTD_CONFIG] = 0x100000;
if (lasat_board_info.li_flash_size >= 0x400000)
ls[LASAT_MTD_FS] =
lasat_board_info.li_flash_size - 0x300000;
}
}
for (i = 1; i < LASAT_MTD_LAST; i++)
lb[i] = lb[i-1] + ls[i-1];
}
int lasat_init_board_info(void)
{
int c;
unsigned long crc;
unsigned long cfg0, cfg1;
const struct product_info *ppi;
int i_n_base_models = N_BASE_MODELS;
const char * const * i_txt_base_models = txt_base_models;
int i_n_prids = N_PRIDS;
memset(&lasat_board_info, 0, sizeof(lasat_board_info));
/* First read the EEPROM info */
EEPROMRead(0, (unsigned char *)&lasat_board_info.li_eeprom_info,
sizeof(struct lasat_eeprom_struct));
/* Check the CRC */
crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info),
sizeof(struct lasat_eeprom_struct) - 4);
if (crc != lasat_board_info.li_eeprom_info.crc32) {
printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does "
"not match calculated, attempting to soldier on...\n");
}
if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) {
printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version "
"%d, wanted version %d, attempting to soldier on...\n",
(unsigned int)lasat_board_info.li_eeprom_info.version,
LASAT_EEPROM_VERSION);
}
cfg0 = lasat_board_info.li_eeprom_info.cfg[0];
cfg1 = lasat_board_info.li_eeprom_info.cfg[1];
if (LASAT_W0_DSCTYPE(cfg0) != 1) {
printk(KERN_WARNING "WARNING...\nWARNING...\n"
"Invalid configuration read from EEPROM, attempting to "
"soldier on...");
}
/* We have a valid configuration */
switch (LASAT_W0_SDRAMBANKSZ(cfg0)) {
case 0:
lasat_board_info.li_memsize = 0x0800000;
break;
case 1:
lasat_board_info.li_memsize = 0x1000000;
break;
case 2:
lasat_board_info.li_memsize = 0x2000000;
break;
case 3:
lasat_board_info.li_memsize = 0x4000000;
break;
case 4:
lasat_board_info.li_memsize = 0x8000000;
break;
default:
lasat_board_info.li_memsize = 0;
}
switch (LASAT_W0_SDRAMBANKS(cfg0)) {
case 0:
break;
case 1:
lasat_board_info.li_memsize *= 2;
break;
default:
break;
}
switch (LASAT_W0_BUSSPEED(cfg0)) {
case 0x0:
lasat_board_info.li_bus_hz = 60000000;
break;
case 0x1:
lasat_board_info.li_bus_hz = 66000000;
break;
case 0x2:
lasat_board_info.li_bus_hz = 66666667;
break;
case 0x3:
lasat_board_info.li_bus_hz = 80000000;
break;
case 0x4:
lasat_board_info.li_bus_hz = 83333333;
break;
case 0x5:
lasat_board_info.li_bus_hz = 100000000;
break;
}
switch (LASAT_W0_CPUCLK(cfg0)) {
case 0x0:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz;
break;
case 0x1:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
(lasat_board_info.li_bus_hz >> 1);
break;
case 0x2:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz;
break;
case 0x3:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz +
(lasat_board_info.li_bus_hz >> 1);
break;
case 0x4:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz;
break;
}
/* Flash size */
switch (LASAT_W1_FLASHSIZE(cfg1)) {
case 0:
lasat_board_info.li_flash_size = 0x200000;
break;
case 1:
lasat_board_info.li_flash_size = 0x400000;
break;
case 2:
lasat_board_info.li_flash_size = 0x800000;
break;
case 3:
lasat_board_info.li_flash_size = 0x1000000;
break;
case 4:
lasat_board_info.li_flash_size = 0x2000000;
break;
}
init_flash_sizes();
lasat_board_info.li_bmid = LASAT_W0_BMID(cfg0);
lasat_board_info.li_prid = lasat_board_info.li_eeprom_info.prid;
if (lasat_board_info.li_prid == 0xffff || lasat_board_info.li_prid == 0)
lasat_board_info.li_prid = lasat_board_info.li_bmid;
/* Base model stuff */
if (lasat_board_info.li_bmid > i_n_base_models)
lasat_board_info.li_bmid = i_n_base_models;
strcpy(lasat_board_info.li_bmstr,
i_txt_base_models[lasat_board_info.li_bmid]);
/* Product ID dependent values */
c = lasat_board_info.li_prid;
if (c >= i_n_prids) {
strcpy(lasat_board_info.li_namestr, "Unknown Model");
strcpy(lasat_board_info.li_typestr, "Unknown Type");
} else {
ppi = &vendor_info_table[0].vi_product_info[c];
strcpy(lasat_board_info.li_namestr, ppi->pi_name);
if (ppi->pi_type)
strcpy(lasat_board_info.li_typestr, ppi->pi_type);
else
sprintf(lasat_board_info.li_typestr, "%d", 10 * c);
}
return 0;
}
void lasat_write_eeprom_info(void)
{
unsigned long crc;
mutex_lock(&lasat_eeprom_mutex);
/* Generate the CRC */
crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info),
sizeof(struct lasat_eeprom_struct) - 4);
lasat_board_info.li_eeprom_info.crc32 = crc;
/* Write the EEPROM info */
EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info,
sizeof(struct lasat_eeprom_struct));
mutex_unlock(&lasat_eeprom_mutex);
}
| gpl-2.0 |
m0zes/linux | drivers/staging/lustre/lustre/ptlrpc/niobuf.c | 228 | 22804 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_RPC
#include "../include/obd_support.h"
#include "../include/lustre_net.h"
#include "../include/lustre_lib.h"
#include "../include/obd.h"
#include "../include/obd_class.h"
#include "ptlrpc_internal.h"
/**
* Helper function. Sends \a len bytes from \a base at offset \a offset
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
unsigned int offset)
{
int rc;
lnet_md_t md;
LASSERT(portal != 0);
LASSERT(conn != NULL);
CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
md.options = PTLRPC_MD_OPTIONS;
md.user_ptr = cbid;
md.eq_handle = ptlrpc_eq_h;
if (unlikely(ack == LNET_ACK_REQ &&
OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK,
OBD_FAIL_ONCE))) {
/* don't ask for the ack to simulate failing client */
ack = LNET_NOACK_REQ;
}
rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
CERROR("LNetMDBind failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
len, portal, xid, offset);
rc = LNetPut(conn->c_self, *mdh, ack,
conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
* which will complete just like any other failed send, so
* I fall through and return success here! */
CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
libcfs_id2str(conn->c_peer), portal, xid, rc);
rc2 = LNetMDUnlink(*mdh);
LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
}
return 0;
}
static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
{
int i;
for (i = 0; i < count; i++)
LNetMDUnlink(bd_mds[i]);
}
/**
* Register bulk at the sender for later transfer.
* Returns 0 on success or error code.
*/
int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
lnet_process_id_t peer;
int rc = 0;
int rc2;
int posted_md;
int total_md;
__u64 xid;
lnet_handle_me_t me_h;
lnet_md_t md;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
return 0;
/* NB no locking required until desc is on the network */
LASSERT(desc->bd_nob > 0);
LASSERT(desc->bd_md_count == 0);
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(desc->bd_req != NULL);
LASSERT(desc->bd_type == BULK_PUT_SINK ||
desc->bd_type == BULK_GET_SOURCE);
/* cleanup the state of the bulk for it will be reused */
if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
desc->bd_nob_transferred = 0;
else
LASSERT(desc->bd_nob_transferred == 0);
desc->bd_failure = 0;
peer = desc->bd_import->imp_connection->c_peer;
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
/* An XID is only used for a single request from the client.
* For retried bulk transfers, a new XID will be allocated in
* in ptlrpc_check_set() if it needs to be resent, so it is not
* using the same RDMA match bits after an error.
*
* For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
* first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
LASSERTF(!(desc->bd_registered &&
req->rq_send_state != LUSTRE_IMP_REPLAY) ||
xid != desc->bd_last_xid,
"registered: %d rq_xid: %llu bd_last_xid: %llu\n",
desc->bd_registered, xid, desc->bd_last_xid);
total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
desc->bd_registered = 1;
desc->bd_last_xid = xid;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
md.eq_handle = ptlrpc_eq_h;
md.threshold = 1; /* PUT or GET */
for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
md.options = PTLRPC_MD_OPTIONS |
((desc->bd_type == BULK_GET_SOURCE) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
LNET_UNLINK, LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
break;
}
/* About to let the network at it... */
rc = LNetMDAttach(me_h, md, LNET_UNLINK,
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
desc->bd_import->imp_obd->obd_name, xid,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
break;
}
}
if (rc != 0) {
LASSERT(rc == -ENOMEM);
spin_lock(&desc->bd_lock);
desc->bd_md_count -= total_md - posted_md;
spin_unlock(&desc->bd_lock);
LASSERT(desc->bd_md_count >= 0);
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
req->rq_status = -ENOMEM;
return -ENOMEM;
}
/* Set rq_xid to matchbits of the final bulk so that server can
* infer the number of bulks that were prepared */
req->rq_xid = --xid;
LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
"bd_last_xid = x%llu, rq_xid = x%llu\n",
desc->bd_last_xid, req->rq_xid);
spin_lock(&desc->bd_lock);
/* Holler if peer manages to touch buffers before he knows the xid */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
total_md - desc->bd_md_count);
spin_unlock(&desc->bd_lock);
CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
desc->bd_md_count,
desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
desc->bd_last_xid, req->rq_xid, desc->bd_portal);
return 0;
}
EXPORT_SYMBOL(ptlrpc_register_bulk);
/**
* Disconnect a bulk desc from the network. Idempotent. Not
* thread-safe (i.e. only interlocks with completion callback).
* Returns 1 on success or 0 if network unregistration failed for whatever
* reason.
*/
int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
wait_queue_head_t *wq;
struct l_wait_info lwi;
int rc;
LASSERT(!in_interrupt()); /* might sleep */
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0)
req->rq_bulk_deadline = get_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
return 1; /* never registered */
LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
/* the unlink ensures the callback happens ASAP and is the last
* one. If it fails, it must be because completion just happened,
* but we must still l_wait_event() in this case to give liblustre
* a chance to run client_bulk_callback() */
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
return 1; /* never registered */
/* Move to "Unregistering" phase as bulk was not unlinked yet. */
ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
/* Do not wait for unlink to finish. */
if (async)
return 0;
if (req->rq_set != NULL)
wq = &req->rq_set->set_waitq;
else
wq = &req->rq_reply_waitq;
for (;;) {
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
if (rc == 0) {
ptlrpc_rqphase_move(req, req->rq_next_phase);
return 1;
}
LASSERT(rc == -ETIMEDOUT);
DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
desc);
}
return 0;
}
EXPORT_SYMBOL(ptlrpc_unregister_bulk);
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
int service_time = max_t(int, get_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
(req->rq_type != PTL_RPC_MSG_ERR) &&
(req->rq_reqmsg != NULL) &&
!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY |
MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
/* early replies, errors and recovery requests don't count
* toward our service time estimate */
int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
if (oldse != 0) {
DEBUG_REQ(D_ADAPTTO, req,
"svc %s changed estimate from %d to %d",
svc->srv_name, oldse,
at_get(&svcpt->scp_at_estimate));
}
}
/* Report actual service time for client latency calc */
lustre_msg_set_service_time(req->rq_repmsg, service_time);
/* Report service time estimate for future client reqs, but report 0
* (to be ignored by client) if it's a error reply during recovery.
* (bz15815) */
if (req->rq_type == PTL_RPC_MSG_ERR &&
(req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
lustre_msg_set_timeout(req->rq_repmsg, 0);
else
lustre_msg_set_timeout(req->rq_repmsg,
at_get(&svcpt->scp_at_estimate));
if (req->rq_reqmsg &&
!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%d:%x/%x len=%d\n",
flags, lustre_msg_get_flags(req->rq_reqmsg),
lustre_msg_is_v1(req->rq_reqmsg),
lustre_msg_get_magic(req->rq_reqmsg),
lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
}
}
/**
* Send request reply from request \a req reply buffer.
* \a flags defines reply types
* Returns 0 on success or error code
*/
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct ptlrpc_connection *conn;
int rc;
/* We must already have a reply buffer (only ptlrpc_error() may be
* called without one). The reply generated by sptlrpc layer (e.g.
* error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
* have a request buffer which is either the actual (swabbed) incoming
* request, or a saved copy if this is a req saved in
* target_queue_final_reply().
*/
LASSERT(req->rq_no_reply == 0);
LASSERT(req->rq_reqbuf != NULL);
LASSERT(rs != NULL);
LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
LASSERT(req->rq_repmsg != NULL);
LASSERT(req->rq_repmsg == rs->rs_msg);
LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
LASSERT(rs->rs_cb_id.cbid_arg == rs);
/* There may be no rq_export during failover */
if (unlikely(req->rq_export && req->rq_export->exp_obd &&
req->rq_export->exp_obd->obd_fail)) {
/* Failed obd's only send ENODEV */
req->rq_type = PTL_RPC_MSG_ERR;
req->rq_status = -ENODEV;
CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
req->rq_export->exp_obd->obd_minor);
}
/* In order to keep interoperability with the client (< 2.3) which
* doesn't have pb_jobid in ptlrpc_body, We have to shrink the
* ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
* reply buffer on client will be overflow.
*
* XXX Remove this whenever we drop the interoperability with
* such client.
*/
req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
sizeof(struct ptlrpc_body_v2), 1);
if (req->rq_type != PTL_RPC_MSG_ERR)
req->rq_type = PTL_RPC_MSG_REPLY;
lustre_msg_set_type(req->rq_repmsg, req->rq_type);
lustre_msg_set_status(req->rq_repmsg,
ptlrpc_status_hton(req->rq_status));
lustre_msg_set_opc(req->rq_repmsg,
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
target_pack_pool_reply(req);
ptlrpc_at_set_reply(req, flags);
if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
else
conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
if (unlikely(conn == NULL)) {
CERROR("not replying on NULL connection\n"); /* bug 9635 */
return -ENOTCONN;
}
ptlrpc_rs_addref(rs); /* +1 ref for the network */
rc = sptlrpc_svc_wrap_reply(req);
if (unlikely(rc))
goto out;
req->rq_sent = get_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
LNET_ACK_REQ : LNET_NOACK_REQ,
&rs->rs_cb_id, conn,
ptlrpc_req2svc(req)->srv_rep_portal,
req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
ptlrpc_connection_put(conn);
return rc;
}
EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply(struct ptlrpc_request *req)
{
if (req->rq_no_reply)
return 0;
return ptlrpc_send_reply(req, 0);
}
EXPORT_SYMBOL(ptlrpc_reply);
/**
* For request \a req send an error reply back. Create empty
* reply buffers if necessary.
*/
int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
{
int rc;
if (req->rq_no_reply)
return 0;
if (!req->rq_repmsg) {
rc = lustre_pack_reply(req, 1, NULL, NULL);
if (rc)
return rc;
}
if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
req->rq_status != -EPERM && req->rq_status != -ENOENT &&
req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
req->rq_type = PTL_RPC_MSG_ERR;
rc = ptlrpc_send_reply(req, may_be_difficult);
return rc;
}
EXPORT_SYMBOL(ptlrpc_send_error);
int ptlrpc_error(struct ptlrpc_request *req)
{
return ptlrpc_send_error(req, 0);
}
EXPORT_SYMBOL(ptlrpc_error);
/**
* Send request \a request.
* if \a noreply is set, don't expect any reply back and don't set up
* reply buffers.
* Returns 0 on success or error code.
*/
int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
{
int rc;
int rc2;
int mpflag = 0;
struct ptlrpc_connection *connection;
lnet_handle_me_t reply_me_h;
lnet_md_t reply_md;
struct obd_device *obd = request->rq_import->imp_obd;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
return 0;
LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
LASSERT(request->rq_wait_ctx == 0);
/* If this is a re-transmit, we're required to have disengaged
* cleanly from the previous attempt */
LASSERT(!request->rq_receiving_reply);
LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
(request->rq_import->imp_state == LUSTRE_IMP_FULL)));
if (unlikely(obd != NULL && obd->obd_fail)) {
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
request->rq_status = -ENODEV;
return -ENODEV;
}
connection = request->rq_import->imp_connection;
lustre_msg_set_handle(request->rq_reqmsg,
&request->rq_import->imp_remote_handle);
lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
lustre_msg_set_conn_cnt(request->rq_reqmsg,
request->rq_import->imp_conn_cnt);
lustre_msghdr_set_flags(request->rq_reqmsg,
request->rq_import->imp_msghdr_flags);
if (request->rq_resend)
lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
if (request->rq_memalloc)
mpflag = cfs_memory_pressure_get_and_set();
rc = sptlrpc_cli_wrap_request(request);
if (rc)
goto out;
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
rc = ptlrpc_register_bulk(request);
if (rc != 0)
goto out;
}
if (!noreply) {
LASSERT(request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT(request->rq_repdata == NULL);
LASSERT(request->rq_repmsg == NULL);
rc = sptlrpc_cli_alloc_repbuf(request,
request->rq_replen);
if (rc) {
/* this prevents us from looping in
* ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
request->rq_status = rc;
goto cleanup_bulk;
}
} else {
request->rq_repdata = NULL;
request->rq_repmsg = NULL;
}
rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
connection->c_peer, request->rq_xid, 0,
LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
rc = -ENOMEM;
goto cleanup_bulk;
}
}
spin_lock(&request->rq_lock);
/* If the MD attach succeeds, there _will_ be a reply_in callback */
request->rq_receiving_reply = !noreply;
request->rq_req_unlink = 1;
/* We are responsible for unlinking the reply buffer */
request->rq_reply_unlink = !noreply;
/* Clear any flags that may be present from previous sends. */
request->rq_replied = 0;
request->rq_err = 0;
request->rq_timedout = 0;
request->rq_net_err = 0;
request->rq_resend = 0;
request->rq_restart = 0;
request->rq_reply_truncate = 0;
spin_unlock(&request->rq_lock);
if (!noreply) {
reply_md.start = request->rq_repbuf;
reply_md.length = request->rq_repbuf_len;
/* Allow multiple early replies */
reply_md.threshold = LNET_MD_THRESH_INF;
/* Manage remote for early replies */
reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
LNET_MD_MANAGE_REMOTE |
LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
reply_md.user_ptr = &request->rq_reply_cbid;
reply_md.eq_handle = ptlrpc_eq_h;
/* We must see the unlink callback to unset rq_reply_unlink,
so we can't auto-unlink */
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
spin_unlock(&request->rq_lock);
rc = -ENOMEM;
goto cleanup_me;
}
CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
request->rq_repbuf_len, request->rq_xid,
request->rq_reply_portal);
}
/* add references on request for request_out_callback */
ptlrpc_request_addref(request);
if (obd != NULL && obd->obd_svc_stats != NULL)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
atomic_read(&request->rq_import->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
do_gettimeofday(&request->rq_arrival_time);
request->rq_sent = get_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
ptlrpc_pinger_sending_on_import(request->rq_import);
DEBUG_REQ(D_INFO, request, "send flg=%x",
lustre_msg_get_flags(request->rq_reqmsg));
rc = ptl_send_buf(&request->rq_req_md_h,
request->rq_reqbuf, request->rq_reqdata_len,
LNET_NOACK_REQ, &request->rq_req_cbid,
connection,
request->rq_request_portal,
request->rq_xid, 0);
if (rc == 0)
goto out;
ptlrpc_req_finished(request);
if (noreply)
goto out;
cleanup_me:
/* MEUnlink is safe; the PUT didn't even get off the ground, and
* nobody apart from the PUT's target has the right nid+XID to
* access the reply buffer. */
rc2 = LNetMEUnlink(reply_me_h);
LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
cleanup_bulk:
/* We do sync unlink here as there was no real transfer here so
* the chance to have long unlink to sluggish net is smaller here. */
ptlrpc_unregister_bulk(request, 0);
out:
if (request->rq_memalloc)
cfs_memory_pressure_restore(mpflag);
return rc;
}
EXPORT_SYMBOL(ptl_send_rpc);
/**
* Register request buffer descriptor for request receiving.
*/
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
int rc;
lnet_md_t md;
lnet_handle_me_t me_h;
CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
service->srv_req_portal);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
return -ENOMEM;
/* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
* which means buffer can only be attached on local CPT, and LND
* threads can find it by grabbing a local lock */
rc = LNetMEAttach(service->srv_req_portal,
match_id, 0, ~0, LNET_UNLINK,
rqbd->rqbd_svcpt->scp_cpt >= 0 ?
LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
return -ENOMEM;
}
LASSERT(rqbd->rqbd_refcount == 0);
rqbd->rqbd_refcount = 1;
md.start = rqbd->rqbd_buffer;
md.length = service->srv_buf_size;
md.max_size = service->srv_max_req_size;
md.threshold = LNET_MD_THRESH_INF;
md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
md.user_ptr = &rqbd->rqbd_cbid;
md.eq_handle = ptlrpc_eq_h;
rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
if (rc == 0)
return 0;
CERROR("LNetMDAttach failed: %d;\n", rc);
LASSERT(rc == -ENOMEM);
rc = LNetMEUnlink(me_h);
LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;
return -ENOMEM;
}
| gpl-2.0 |
markbrown314/edison-linux | drivers/staging/comedi/drivers/cb_pcidda.c | 228 | 12611 | /*
* comedi/drivers/cb_pcidda.c
* Driver for the ComputerBoards / MeasurementComputing PCI-DDA series.
*
* Copyright (C) 2001 Ivan Martinez <ivanmr@altavista.com>
* Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: cb_pcidda
* Description: MeasurementComputing PCI-DDA series
* Devices: [Measurement Computing] PCI-DDA08/12 (pci-dda08/12),
* PCI-DDA04/12 (pci-dda04/12), PCI-DDA02/12 (pci-dda02/12),
* PCI-DDA08/16 (pci-dda08/16), PCI-DDA04/16 (pci-dda04/16),
* PCI-DDA02/16 (pci-dda02/16)
* Author: Ivan Martinez <ivanmr@altavista.com>
* Frank Mori Hess <fmhess@users.sourceforge.net>
* Status: works
*
* Configuration options: not applicable, uses PCI auto config
*
* Only simple analog output writing is supported.
*/
#include <linux/module.h>
#include "../comedi_pci.h"
#include "8255.h"
#define EEPROM_SIZE 128 /* number of entries in eeprom */
/* maximum number of ao channels for supported boards */
#define MAX_AO_CHANNELS 8
/* Digital I/O registers */
#define CB_DDA_DIO0_8255_BASE 0x00
#define CB_DDA_DIO1_8255_BASE 0x04
/* DAC registers */
#define CB_DDA_DA_CTRL_REG 0x00 /* D/A Control Register */
#define CB_DDA_DA_CTRL_SU (1 << 0) /* Simultaneous update */
#define CB_DDA_DA_CTRL_EN (1 << 1) /* Enable specified DAC */
#define CB_DDA_DA_CTRL_DAC(x) ((x) << 2) /* Specify DAC channel */
#define CB_DDA_DA_CTRL_RANGE2V5 (0 << 6) /* 2.5V range */
#define CB_DDA_DA_CTRL_RANGE5V (2 << 6) /* 5V range */
#define CB_DDA_DA_CTRL_RANGE10V (3 << 6) /* 10V range */
#define CB_DDA_DA_CTRL_UNIP (1 << 8) /* Unipolar range */
#define DACALIBRATION1 4 /* D/A CALIBRATION REGISTER 1 */
/* write bits */
/* serial data input for eeprom, caldacs, reference dac */
#define SERIAL_IN_BIT 0x1
#define CAL_CHANNEL_MASK (0x7 << 1)
#define CAL_CHANNEL_BITS(channel) (((channel) << 1) & CAL_CHANNEL_MASK)
/* read bits */
#define CAL_COUNTER_MASK 0x1f
/* calibration counter overflow status bit */
#define CAL_COUNTER_OVERFLOW_BIT 0x20
/* analog output is less than reference dac voltage */
#define AO_BELOW_REF_BIT 0x40
#define SERIAL_OUT_BIT 0x80 /* serial data out, for reading from eeprom */
#define DACALIBRATION2 6 /* D/A CALIBRATION REGISTER 2 */
#define SELECT_EEPROM_BIT 0x1 /* send serial data in to eeprom */
/* don't send serial data to MAX542 reference dac */
#define DESELECT_REF_DAC_BIT 0x2
/* don't send serial data to caldac n */
#define DESELECT_CALDAC_BIT(n) (0x4 << (n))
/* manual says to set this bit with no explanation */
#define DUMMY_BIT 0x40
#define CB_DDA_DA_DATA_REG(x) (0x08 + ((x) * 2))
/* Offsets for the caldac channels */
#define CB_DDA_CALDAC_FINE_GAIN 0
#define CB_DDA_CALDAC_COURSE_GAIN 1
#define CB_DDA_CALDAC_COURSE_OFFSET 2
#define CB_DDA_CALDAC_FINE_OFFSET 3
static const struct comedi_lrange cb_pcidda_ranges = {
6, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5)
}
};
enum cb_pcidda_boardid {
BOARD_DDA02_12,
BOARD_DDA04_12,
BOARD_DDA08_12,
BOARD_DDA02_16,
BOARD_DDA04_16,
BOARD_DDA08_16,
};
struct cb_pcidda_board {
const char *name;
int ao_chans;
int ao_bits;
};
static const struct cb_pcidda_board cb_pcidda_boards[] = {
[BOARD_DDA02_12] = {
.name = "pci-dda02/12",
.ao_chans = 2,
.ao_bits = 12,
},
[BOARD_DDA04_12] = {
.name = "pci-dda04/12",
.ao_chans = 4,
.ao_bits = 12,
},
[BOARD_DDA08_12] = {
.name = "pci-dda08/12",
.ao_chans = 8,
.ao_bits = 12,
},
[BOARD_DDA02_16] = {
.name = "pci-dda02/16",
.ao_chans = 2,
.ao_bits = 16,
},
[BOARD_DDA04_16] = {
.name = "pci-dda04/16",
.ao_chans = 4,
.ao_bits = 16,
},
[BOARD_DDA08_16] = {
.name = "pci-dda08/16",
.ao_chans = 8,
.ao_bits = 16,
},
};
struct cb_pcidda_private {
unsigned long daqio;
/* bits last written to da calibration register 1 */
unsigned int dac_cal1_bits;
/* current range settings for output channels */
unsigned int ao_range[MAX_AO_CHANNELS];
u16 eeprom_data[EEPROM_SIZE]; /* software copy of board's eeprom */
};
/* lowlevel read from eeprom */
static unsigned int cb_pcidda_serial_in(struct comedi_device *dev)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int value = 0;
int i;
const int value_width = 16; /* number of bits wide values are */
for (i = 1; i <= value_width; i++) {
/* read bits most significant bit first */
if (inw_p(devpriv->daqio + DACALIBRATION1) & SERIAL_OUT_BIT)
value |= 1 << (value_width - i);
}
return value;
}
/* lowlevel write to eeprom/dac */
static void cb_pcidda_serial_out(struct comedi_device *dev, unsigned int value,
unsigned int num_bits)
{
struct cb_pcidda_private *devpriv = dev->private;
int i;
for (i = 1; i <= num_bits; i++) {
/* send bits most significant bit first */
if (value & (1 << (num_bits - i)))
devpriv->dac_cal1_bits |= SERIAL_IN_BIT;
else
devpriv->dac_cal1_bits &= ~SERIAL_IN_BIT;
outw_p(devpriv->dac_cal1_bits, devpriv->daqio + DACALIBRATION1);
}
}
/* reads a 16 bit value from board's eeprom */
static unsigned int cb_pcidda_read_eeprom(struct comedi_device *dev,
unsigned int address)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int i;
unsigned int cal2_bits;
unsigned int value;
/* one caldac for every two dac channels */
const int max_num_caldacs = 4;
/* bits to send to tell eeprom we want to read */
const int read_instruction = 0x6;
const int instruction_length = 3;
const int address_length = 8;
/* send serial output stream to eeprom */
cal2_bits = SELECT_EEPROM_BIT | DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
/* tell eeprom we want to read */
cb_pcidda_serial_out(dev, read_instruction, instruction_length);
/* send address we want to read from */
cb_pcidda_serial_out(dev, address, address_length);
value = cb_pcidda_serial_in(dev);
/* deactivate eeprom */
cal2_bits &= ~SELECT_EEPROM_BIT;
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
return value;
}
/* writes to 8 bit calibration dacs */
static void cb_pcidda_write_caldac(struct comedi_device *dev,
unsigned int caldac, unsigned int channel,
unsigned int value)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int cal2_bits;
unsigned int i;
/* caldacs use 3 bit channel specification */
const int num_channel_bits = 3;
const int num_caldac_bits = 8; /* 8 bit calibration dacs */
/* one caldac for every two dac channels */
const int max_num_caldacs = 4;
/* write 3 bit channel */
cb_pcidda_serial_out(dev, channel, num_channel_bits);
/* write 8 bit caldac value */
cb_pcidda_serial_out(dev, value, num_caldac_bits);
/*
* latch stream into appropriate caldac deselect reference dac
*/
cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
/* deactivate caldacs (one caldac for every two channels) */
for (i = 0; i < max_num_caldacs; i++)
cal2_bits |= DESELECT_CALDAC_BIT(i);
/* activate the caldac we want */
cal2_bits &= ~DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
/* deactivate caldac */
cal2_bits |= DESELECT_CALDAC_BIT(caldac);
outw_p(cal2_bits, devpriv->daqio + DACALIBRATION2);
}
/* set caldacs to eeprom values for given channel and range */
static void cb_pcidda_calibrate(struct comedi_device *dev, unsigned int channel,
unsigned int range)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int caldac = channel / 2; /* two caldacs per channel */
unsigned int chan = 4 * (channel % 2); /* caldac channel base */
unsigned int index = 2 * range + 12 * channel;
unsigned int offset;
unsigned int gain;
/* save range so we can tell when we need to readjust calibration */
devpriv->ao_range[channel] = range;
/* get values from eeprom data */
offset = devpriv->eeprom_data[0x7 + index];
gain = devpriv->eeprom_data[0x8 + index];
/* set caldacs */
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_OFFSET,
(offset >> 8) & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_OFFSET,
offset & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_COURSE_GAIN,
(gain >> 8) & 0xff);
cb_pcidda_write_caldac(dev, caldac, chan + CB_DDA_CALDAC_FINE_GAIN,
gain & 0xff);
}
static int cb_pcidda_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct cb_pcidda_private *devpriv = dev->private;
unsigned int channel = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int ctrl;
if (range != devpriv->ao_range[channel])
cb_pcidda_calibrate(dev, channel, range);
ctrl = CB_DDA_DA_CTRL_EN | CB_DDA_DA_CTRL_DAC(channel);
switch (range) {
case 0:
case 3:
ctrl |= CB_DDA_DA_CTRL_RANGE10V;
break;
case 1:
case 4:
ctrl |= CB_DDA_DA_CTRL_RANGE5V;
break;
case 2:
case 5:
ctrl |= CB_DDA_DA_CTRL_RANGE2V5;
break;
}
if (range > 2)
ctrl |= CB_DDA_DA_CTRL_UNIP;
outw(ctrl, devpriv->daqio + CB_DDA_DA_CTRL_REG);
outw(data[0], devpriv->daqio + CB_DDA_DA_DATA_REG(channel));
return insn->n;
}
static int cb_pcidda_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct cb_pcidda_board *thisboard = NULL;
struct cb_pcidda_private *devpriv;
struct comedi_subdevice *s;
int i;
int ret;
if (context < ARRAY_SIZE(cb_pcidda_boards))
thisboard = &cb_pcidda_boards[context];
if (!thisboard)
return -ENODEV;
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
devpriv->daqio = pci_resource_start(pcidev, 3);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = thisboard->ao_chans;
s->maxdata = (1 << thisboard->ao_bits) - 1;
s->range_table = &cb_pcidda_ranges;
s->insn_write = cb_pcidda_ao_insn_write;
/* two 8255 digital io subdevices */
for (i = 0; i < 2; i++) {
s = &dev->subdevices[1 + i];
ret = subdev_8255_init(dev, s, NULL, i * I8255_SIZE);
if (ret)
return ret;
}
/* Read the caldac eeprom data */
for (i = 0; i < EEPROM_SIZE; i++)
devpriv->eeprom_data[i] = cb_pcidda_read_eeprom(dev, i);
/* set calibrations dacs */
for (i = 0; i < thisboard->ao_chans; i++)
cb_pcidda_calibrate(dev, i, devpriv->ao_range[i]);
return 0;
}
static struct comedi_driver cb_pcidda_driver = {
.driver_name = "cb_pcidda",
.module = THIS_MODULE,
.auto_attach = cb_pcidda_auto_attach,
.detach = comedi_pci_detach,
};
static int cb_pcidda_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &cb_pcidda_driver,
id->driver_data);
}
static const struct pci_device_id cb_pcidda_pci_table[] = {
{ PCI_VDEVICE(CB, 0x0020), BOARD_DDA02_12 },
{ PCI_VDEVICE(CB, 0x0021), BOARD_DDA04_12 },
{ PCI_VDEVICE(CB, 0x0022), BOARD_DDA08_12 },
{ PCI_VDEVICE(CB, 0x0023), BOARD_DDA02_16 },
{ PCI_VDEVICE(CB, 0x0024), BOARD_DDA04_16 },
{ PCI_VDEVICE(CB, 0x0025), BOARD_DDA08_16 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, cb_pcidda_pci_table);
static struct pci_driver cb_pcidda_pci_driver = {
.name = "cb_pcidda",
.id_table = cb_pcidda_pci_table,
.probe = cb_pcidda_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(cb_pcidda_driver, cb_pcidda_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Wren6991/linux | drivers/staging/unisys/visorutil/periodic_work.c | 228 | 6093 | /* periodic_work.c
*
* Copyright (C) 2010 - 2013 UNISYS CORPORATION
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*/
/*
* Helper functions to schedule periodic work in Linux kernel mode.
*/
#include "timskmod.h"
#include "periodic_work.h"
#define MYDRVNAME "periodic_work"
struct periodic_work {
rwlock_t lock;
struct delayed_work work;
void (*workfunc)(void *);
void *workfuncarg;
BOOL is_scheduled;
BOOL want_to_stop;
ulong jiffy_interval;
struct workqueue_struct *workqueue;
const char *devnam;
};
static void periodic_work_func(struct work_struct *work)
{
struct periodic_work *pw;
pw = container_of(work, struct periodic_work, work.work);
(*pw->workfunc)(pw->workfuncarg);
}
struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
struct workqueue_struct *workqueue,
void (*workfunc)(void *),
void *workfuncarg,
const char *devnam)
{
struct periodic_work *pw;
pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY);
if (!pw)
return NULL;
rwlock_init(&pw->lock);
pw->jiffy_interval = jiffy_interval;
pw->workqueue = workqueue;
pw->workfunc = workfunc;
pw->workfuncarg = workfuncarg;
pw->devnam = devnam;
return pw;
}
EXPORT_SYMBOL_GPL(visor_periodic_work_create);
void visor_periodic_work_destroy(struct periodic_work *pw)
{
kfree(pw);
}
EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
/** Call this from your periodic work worker function to schedule the next
* call.
* If this function returns FALSE, there was a failure and the
* periodic work is no longer scheduled
*/
BOOL visor_periodic_work_nextperiod(struct periodic_work *pw)
{
BOOL rc = FALSE;
write_lock(&pw->lock);
if (pw->want_to_stop) {
pw->is_scheduled = FALSE;
pw->want_to_stop = FALSE;
rc = TRUE; /* yes, TRUE; see visor_periodic_work_stop() */
goto unlock;
} else if (queue_delayed_work(pw->workqueue, &pw->work,
pw->jiffy_interval) < 0) {
pw->is_scheduled = FALSE;
rc = FALSE;
goto unlock;
}
rc = TRUE;
unlock:
write_unlock(&pw->lock);
return rc;
}
EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
/** This function returns TRUE iff new periodic work was actually started.
* If this function returns FALSE, then no work was started
* (either because it was already started, or because of a failure).
*/
BOOL visor_periodic_work_start(struct periodic_work *pw)
{
BOOL rc = FALSE;
write_lock(&pw->lock);
if (pw->is_scheduled) {
rc = FALSE;
goto unlock;
}
if (pw->want_to_stop) {
rc = FALSE;
goto unlock;
}
INIT_DELAYED_WORK(&pw->work, &periodic_work_func);
if (queue_delayed_work(pw->workqueue, &pw->work,
pw->jiffy_interval) < 0) {
rc = FALSE;
goto unlock;
}
pw->is_scheduled = TRUE;
rc = TRUE;
unlock:
write_unlock(&pw->lock);
return rc;
}
EXPORT_SYMBOL_GPL(visor_periodic_work_start);
/** This function returns TRUE iff your call actually stopped the periodic
* work.
*
* -- PAY ATTENTION... this is important --
*
* NO NO #1
*
* Do NOT call this function from some function that is running on the
* same workqueue as the work you are trying to stop might be running
* on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
* but it also MIGHT get hung up in an infinite loop saying
* "waiting for delayed work...". This will happen if the delayed work
* you are trying to cancel has been put in the workqueue list, but can't
* run yet because we are running that same workqueue thread right now.
*
* Bottom line: If you need to call visor_periodic_work_stop() from a
* workitem, be sure the workitem is on a DIFFERENT workqueue than the
* workitem that you are trying to cancel.
*
* If I could figure out some way to check for this "no no" condition in
* the code, I would. It would have saved me the trouble of writing this
* long comment. And also, don't think this is some "theoretical" race
* condition. It is REAL, as I have spent the day chasing it.
*
* NO NO #2
*
* Take close note of the locks that you own when you call this function.
* You must NOT own any locks that are needed by the periodic work
* function that is currently installed. If you DO, a deadlock may result,
* because stopping the periodic work often involves waiting for the last
* iteration of the periodic work function to complete. Again, if you hit
* this deadlock, you will get hung up in an infinite loop saying
* "waiting for delayed work...".
*/
BOOL visor_periodic_work_stop(struct periodic_work *pw)
{
BOOL stopped_something = FALSE;
write_lock(&pw->lock);
stopped_something = pw->is_scheduled && (!pw->want_to_stop);
while (pw->is_scheduled) {
pw->want_to_stop = TRUE;
if (cancel_delayed_work(&pw->work)) {
/* We get here if the delayed work was pending as
* delayed work, but was NOT run.
*/
WARN_ON(!pw->is_scheduled);
pw->is_scheduled = FALSE;
} else {
/* If we get here, either the delayed work:
* - was run, OR,
* - is running RIGHT NOW on another processor, OR,
* - wasn't even scheduled (there is a miniscule
* timing window where this could be the case)
* flush_workqueue() would make sure it is finished
* executing, but that still isn't very useful, which
* explains the loop...
*/
}
if (pw->is_scheduled) {
write_unlock(&pw->lock);
SLEEPJIFFIES(10);
write_lock(&pw->lock);
} else {
pw->want_to_stop = FALSE;
}
}
write_unlock(&pw->lock);
return stopped_something;
}
EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
| gpl-2.0 |
supersonicninja/HW01EKERNEL | drivers/media/video/msm/vb6801.c | 740 | 42186 | /* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <media/msm_camera.h>
#include <mach/gpio.h>
#include <mach/camera.h>
#include "vb6801.h"
/*=============================================================
SENSOR REGISTER DEFINES
==============================================================*/
enum {
REG_HOLD = 0x0104,
RELEASE_HOLD = 0x0000,
HOLD = 0x0001,
STANDBY_MODE = 0x0000,
REG_COARSE_INTEGRATION_TIME = 0x0202,
REG_ANALOGUE_GAIN_CODE_GLOBAL = 0x0204,
REG_RAMP_SCALE = 0x3116,
REG_POWER_MAN_ENABLE_3 = 0x3142,
REG_POWER_MAN_ENABLE_4 = 0x3143,
REG_POWER_MAN_ENABLE_5 = 0x3144,
REG_CCP2_DATA_FORMAT = 0x0112,
REG_PRE_PLL_CLK_DIV = 0x0304,
REG_PLL_MULTIPLIER = 0x0306,
REG_VT_SYS_CLK_DIV = 0x0302,
REG_VT_PIX_CLK_DIV = 0x0300,
REG_OP_SYS_CLK_DIV = 0x030A,
REG_OP_PIX_CLK_DIV = 0x0308,
REG_VT_LINE_LENGTH_PCK = 0x0342,
REG_X_OUTPUT_SIZE = 0x034C,
REG_Y_OUTPUT_SIZE = 0x034E,
REG_X_ODD_INC = 0x0382,
REG_Y_ODD_INC = 0x0386,
REG_VT_FRAME_LENGTH_LINES = 0x0340,
REG_ANALOG_TIMING_MODES_2 = 0x3113,
REG_BRUCE_ENABLE = 0x37B0,
REG_OP_CODER_SYNC_CLK_SETUP = 0x3400,
REG_OP_CODER_ENABLE = 0x3401,
REG_OP_CODER_SLOW_PAD_EN = 0x3402,
REG_OP_CODER_AUTO_STARTUP = 0x3414,
REG_SCYTHE_ENABLE = 0x3204,
REG_SCYTHE_WEIGHT = 0x3206,
REG_FRAME_COUNT = 0x0005,
REG_MODE_SELECT = 0x0100,
REG_CCP2_CHANNEL_IDENTIFIER = 0x0110,
REG_CCP2_SIGNALLING_MODE = 0x0111,
REG_BTL_LEVEL_SETUP = 0x311B,
REG_OP_CODER_AUTOMATIC_MODE_ENABLE = 0x3403,
REG_PLL_CTRL = 0x3801,
REG_VCM_DAC_CODE = 0x3860,
REG_VCM_DAC_STROBE = 0x3868,
REG_VCM_DAC_ENABLE = 0x386C,
REG_NVM_T1_ADDR_00 = 0x3600,
REG_NVM_T1_ADDR_01 = 0x3601,
REG_NVM_T1_ADDR_02 = 0x3602,
REG_NVM_T1_ADDR_03 = 0x3603,
REG_NVM_T1_ADDR_04 = 0x3604,
REG_NVM_T1_ADDR_05 = 0x3605,
REG_NVM_T1_ADDR_06 = 0x3606,
REG_NVM_T1_ADDR_07 = 0x3607,
REG_NVM_T1_ADDR_08 = 0x3608,
REG_NVM_T1_ADDR_09 = 0x3609,
REG_NVM_T1_ADDR_0A = 0x360A,
REG_NVM_T1_ADDR_0B = 0x360B,
REG_NVM_T1_ADDR_0C = 0x360C,
REG_NVM_T1_ADDR_0D = 0x360D,
REG_NVM_T1_ADDR_0E = 0x360E,
REG_NVM_T1_ADDR_0F = 0x360F,
REG_NVM_T1_ADDR_10 = 0x3610,
REG_NVM_T1_ADDR_11 = 0x3611,
REG_NVM_T1_ADDR_12 = 0x3612,
REG_NVM_T1_ADDR_13 = 0x3613,
REG_NVM_CTRL = 0x3680,
REG_NVM_PDN = 0x3681,
REG_NVM_PULSE_WIDTH = 0x368B,
};
#define VB6801_LINES_PER_FRAME_PREVIEW 800
#define VB6801_LINES_PER_FRAME_SNAPSHOT 1600
#define VB6801_PIXELS_PER_LINE_PREVIEW 2500
#define VB6801_PIXELS_PER_LINE_SNAPSHOT 2500
/* AF constant */
#define VB6801_TOTAL_STEPS_NEAR_TO_FAR 25
#define VB6801_STEPS_NEAR_TO_CLOSEST_INF 25
/* for 30 fps preview */
#define VB6801_DEFAULT_CLOCK_RATE 12000000
enum vb6801_test_mode_t {
TEST_OFF,
TEST_1,
TEST_2,
TEST_3
};
enum vb6801_resolution_t {
QTR_SIZE,
FULL_SIZE,
INVALID_SIZE
};
enum vb6801_setting_t {
RES_PREVIEW,
RES_CAPTURE
};
struct vb6801_work_t {
struct work_struct work;
};
struct sensor_dynamic_params_t {
uint16_t preview_pixelsPerLine;
uint16_t preview_linesPerFrame;
uint16_t snapshot_pixelsPerLine;
uint16_t snapshot_linesPerFrame;
uint8_t snapshot_changed_fps;
uint32_t pclk;
};
struct vb6801_sensor_info {
/* Sensor Configuration Input Parameters */
uint32_t ext_clk_freq_mhz;
uint32_t target_frame_rate_fps;
uint32_t target_vt_pix_clk_freq_mhz;
uint32_t sub_sampling_factor;
uint32_t analog_binning_allowed;
uint32_t raw_mode;
uint32_t capture_mode;
/* Image Readout Registers */
uint32_t x_odd_inc; /* x pixel array addressing odd increment */
uint32_t y_odd_inc; /* y pixel array addressing odd increment */
uint32_t x_output_size; /* width of output image */
uint32_t y_output_size; /* height of output image */
/* Declare data format */
uint32_t ccp2_data_format;
/* Clock Tree Registers */
uint32_t pre_pll_clk_div;
uint32_t pll_multiplier;
uint32_t vt_sys_clk_div;
uint32_t vt_pix_clk_div;
uint32_t op_sys_clk_div;
uint32_t op_pix_clk_div;
/* Video Timing Registers */
uint32_t vt_line_length_pck;
uint32_t vt_frame_length_lines;
/* Analogue Binning Registers */
uint8_t vtiming_major;
uint8_t analog_timing_modes_4;
/* Fine (pixel) Integration Time Registers */
uint32_t fine_integration_time;
/* Coarse (lines) Integration Time Limit Registers */
uint32_t coarse_integration_time_max;
/* Coarse (lines) Integration Timit Register (16-bit) */
uint32_t coarse_integration_time;
/* Analogue Gain Code Global Registers */
uint32_t analogue_gain_code_global;
/* Digital Gain Code Registers */
uint32_t digital_gain_code;
/* Overall gain (analogue & digital) code
* Note that this is not a real register but just
* an abstraction for the combination of analogue
* and digital gain */
uint32_t gain_code;
/* FMT Test Information */
uint32_t pass_fail;
uint32_t day;
uint32_t month;
uint32_t year;
uint32_t tester;
uint32_t part_number;
/* Autofocus controls */
uint32_t vcm_dac_code;
int vcm_max_dac_code_step;
int vcm_proportional_factor;
int vcm_dac_code_spacing_ms;
/* VCM NVM Characterisation Information */
uint32_t vcm_dac_code_infinity_dn;
uint32_t vcm_dac_code_macro_up;
uint32_t vcm_dac_code_up_dn_delta;
/* Internal Variables */
uint32_t min_vt_frame_length_lines;
};
struct vb6801_work_t *vb6801_sensorw;
struct i2c_client *vb6801_client;
struct vb6801_ctrl_t {
const struct msm_camera_sensor_info *sensordata;
int sensormode;
uint32_t factor_fps; /* init to 1 * 0x00000400 */
uint16_t curr_fps;
uint16_t max_fps;
int8_t pict_exp_update;
int8_t reducel;
uint16_t curr_lens_pos;
uint16_t init_curr_lens_pos;
enum vb6801_resolution_t prev_res;
enum vb6801_resolution_t pict_res;
enum vb6801_resolution_t curr_res;
enum vb6801_test_mode_t set_test;
struct vb6801_sensor_info s_info;
struct sensor_dynamic_params_t s_dynamic_params;
};
static struct vb6801_ctrl_t *vb6801_ctrl;
static DECLARE_WAIT_QUEUE_HEAD(vb6801_wait_queue);
DEFINE_MUTEX(vb6801_mut);
static int vb6801_i2c_rxdata(unsigned short saddr,
unsigned char *rxdata, int length)
{
struct i2c_msg msgs[] = {
{
.addr = saddr,
.flags = 0,
.len = 2,
.buf = rxdata,
},
{
.addr = saddr,
.flags = I2C_M_RD,
.len = 2,
.buf = rxdata,
},
};
if (i2c_transfer(vb6801_client->adapter, msgs, 2) < 0) {
CDBG("vb6801_i2c_rxdata failed!\n");
return -EIO;
}
return 0;
}
static int32_t vb6801_i2c_read(unsigned short raddr,
unsigned short *rdata, int rlen)
{
int32_t rc = 0;
unsigned char buf[2];
if (!rdata)
return -EIO;
memset(buf, 0, sizeof(buf));
buf[0] = (raddr & 0xFF00) >> 8;
buf[1] = (raddr & 0x00FF);
rc = vb6801_i2c_rxdata(vb6801_client->addr, buf, rlen);
if (rc < 0) {
CDBG("vb6801_i2c_read 0x%x failed!\n", raddr);
return rc;
}
*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
return rc;
}
static int32_t vb6801_i2c_read_table(struct vb6801_i2c_reg_conf_t *regs,
int items)
{
int i;
int32_t rc = -EFAULT;
for (i = 0; i < items; i++) {
unsigned short *buf =
regs->dlen == D_LEN_BYTE ?
(unsigned short *)®s->bdata :
(unsigned short *)®s->wdata;
rc = vb6801_i2c_read(regs->waddr, buf, regs->dlen + 1);
if (rc < 0) {
CDBG("vb6801_i2c_read_table Failed!!!\n");
break;
}
regs++;
}
return rc;
}
static int32_t vb6801_i2c_txdata(unsigned short saddr,
unsigned char *txdata, int length)
{
struct i2c_msg msg[] = {
{
.addr = saddr,
.flags = 0,
.len = length,
.buf = txdata,
},
};
if (i2c_transfer(vb6801_client->adapter, msg, 1) < 0) {
CDBG("vb6801_i2c_txdata faild 0x%x\n", vb6801_client->addr);
return -EIO;
}
return 0;
}
static int32_t vb6801_i2c_write_b(unsigned short waddr, uint8_t bdata)
{
int32_t rc = -EFAULT;
unsigned char buf[3];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = bdata;
CDBG("i2c_write_b addr = %d, val = %d\n", waddr, bdata);
rc = vb6801_i2c_txdata(vb6801_client->addr, buf, 3);
if (rc < 0) {
CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
waddr, bdata);
}
return rc;
}
static int32_t vb6801_i2c_write_w(unsigned short waddr, unsigned short wdata)
{
int32_t rc = -EFAULT;
unsigned char buf[4];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = (wdata & 0xFF00) >> 8;
buf[3] = (wdata & 0x00FF);
CDBG("i2c_write_w addr = %d, val = %d, buf[2] = 0x%x, buf[3] = 0x%x\n",
waddr, wdata, buf[2], buf[3]);
rc = vb6801_i2c_txdata(vb6801_client->addr, buf, 4);
if (rc < 0) {
CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n",
waddr, wdata);
}
return rc;
}
static int32_t vb6801_i2c_write_table(struct vb6801_i2c_reg_conf_t *regs,
int items)
{
int i;
int32_t rc = -EFAULT;
for (i = 0; i < items; i++) {
rc = ((regs->dlen == D_LEN_BYTE) ?
vb6801_i2c_write_b(regs->waddr, regs->bdata) :
vb6801_i2c_write_w(regs->waddr, regs->wdata));
if (rc < 0) {
CDBG("vb6801_i2c_write_table Failed!!!\n");
break;
}
regs++;
}
return rc;
}
static int32_t vb6801_reset(const struct msm_camera_sensor_info *data)
{
int rc;
rc = gpio_request(data->sensor_reset, "vb6801");
if (!rc) {
CDBG("sensor_reset SUcceeded\n");
gpio_direction_output(data->sensor_reset, 0);
mdelay(50);
gpio_direction_output(data->sensor_reset, 1);
mdelay(13);
} else
CDBG("sensor_reset FAiled\n");
return rc;
}
static int32_t vb6801_set_default_focus(void)
{
int32_t rc = 0;
/* FIXME: Default focus not supported */
return rc;
}
static void vb6801_get_pict_fps(uint16_t fps, uint16_t *pfps)
{
/* input fps is preview fps in Q8 format */
uint32_t divider; /*Q10 */
uint32_t pclk_mult; /*Q10 */
uint32_t d1;
uint32_t d2;
d1 =
(uint32_t)(
(vb6801_ctrl->s_dynamic_params.preview_linesPerFrame *
0x00000400) /
vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame);
d2 =
(uint32_t)(
(vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine *
0x00000400) /
vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine);
divider = (uint32_t) (d1 * d2) / 0x00000400;
pclk_mult = (48 * 0x400) / 60;
/* Verify PCLK settings and frame sizes. */
*pfps = (uint16_t)((((fps * pclk_mult) / 0x00000400) * divider)/
0x00000400);
}
static uint16_t vb6801_get_prev_lines_pf(void)
{
if (vb6801_ctrl->prev_res == QTR_SIZE)
return vb6801_ctrl->s_dynamic_params.preview_linesPerFrame;
else
return vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame;
}
static uint16_t vb6801_get_prev_pixels_pl(void)
{
if (vb6801_ctrl->prev_res == QTR_SIZE)
return vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine;
else
return vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine;
}
static uint16_t vb6801_get_pict_lines_pf(void)
{
return vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame;
}
static uint16_t vb6801_get_pict_pixels_pl(void)
{
return vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine;
}
static uint32_t vb6801_get_pict_max_exp_lc(void)
{
uint16_t snapshot_lines_per_frame;
if (vb6801_ctrl->pict_res == QTR_SIZE) {
snapshot_lines_per_frame =
vb6801_ctrl->s_dynamic_params.preview_linesPerFrame - 3;
} else {
snapshot_lines_per_frame =
vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame - 3;
}
return snapshot_lines_per_frame;
}
static int32_t vb6801_set_fps(struct fps_cfg *fps)
{
int32_t rc = 0;
/* input is new fps in Q8 format */
switch (fps->fps_div) {
case 7680: /* 30 * Q8 */
vb6801_ctrl->factor_fps = 1;
break;
case 3840: /* 15 * Q8 */
vb6801_ctrl->factor_fps = 2;
break;
case 2560: /* 10 * Q8 */
vb6801_ctrl->factor_fps = 3;
break;
case 1920: /* 7.5 * Q8 */
vb6801_ctrl->factor_fps = 4;
break;
default:
rc = -ENODEV;
break;
}
return rc;
}
static int32_t vb6801_write_exp_gain(uint16_t gain, uint32_t line)
{
int32_t rc = 0;
uint16_t lpf;
if (vb6801_ctrl->curr_res == SENSOR_FULL_SIZE)
lpf = VB6801_LINES_PER_FRAME_SNAPSHOT;
else
lpf = VB6801_LINES_PER_FRAME_PREVIEW;
/* hold */
rc = vb6801_i2c_write_w(REG_HOLD, HOLD);
if (rc < 0)
goto exp_gain_done;
if ((vb6801_ctrl->curr_fps <
vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps) &&
(!vb6801_ctrl->pict_exp_update)) {
if (vb6801_ctrl->reducel) {
rc = vb6801_i2c_write_w(REG_VT_FRAME_LENGTH_LINES,
lpf * vb6801_ctrl->factor_fps);
vb6801_ctrl->curr_fps =
vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps;
} else if (!vb6801_ctrl->reducel) {
rc = vb6801_i2c_write_w(REG_COARSE_INTEGRATION_TIME,
line * vb6801_ctrl->factor_fps);
vb6801_ctrl->reducel = 1;
}
} else if ((vb6801_ctrl->curr_fps >
vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps) &&
(!vb6801_ctrl->pict_exp_update)) {
rc = vb6801_i2c_write_w(REG_VT_FRAME_LENGTH_LINES,
lpf * vb6801_ctrl->factor_fps);
vb6801_ctrl->curr_fps =
vb6801_ctrl->max_fps / vb6801_ctrl->factor_fps;
} else {
/* analogue_gain_code_global */
rc = vb6801_i2c_write_w(REG_ANALOGUE_GAIN_CODE_GLOBAL, gain);
if (rc < 0)
goto exp_gain_done;
/* coarse_integration_time */
rc = vb6801_i2c_write_w(REG_COARSE_INTEGRATION_TIME,
line * vb6801_ctrl->factor_fps);
if (rc < 0)
goto exp_gain_done;
vb6801_ctrl->pict_exp_update = 1;
}
rc = vb6801_i2c_write_w(REG_HOLD, RELEASE_HOLD);
exp_gain_done:
return rc;
}
static int32_t vb6801_set_pict_exp_gain(uint16_t gain, uint32_t line)
{
vb6801_ctrl->pict_exp_update = 1;
return vb6801_write_exp_gain(gain, line);
}
static int32_t vb6801_power_down(void)
{
int32_t rc = 0;
rc = vb6801_i2c_write_b(REG_NVM_PDN, 0);
mdelay(5);
return rc;
}
static int32_t vb6801_go_to_position(uint32_t target_vcm_dac_code,
struct vb6801_sensor_info *ps)
{
/* Prior to running this function the following values must
* be initialised in the sensor data structure, PS
* ps->vcm_dac_code
* ps->vcm_max_dac_code_step
* ps->vcm_dac_code_spacing_ms */
int32_t rc = 0;
ps->vcm_dac_code = target_vcm_dac_code;
/* Restore Strobe to zero state */
rc = vb6801_i2c_write_b(REG_VCM_DAC_STROBE, 0x00);
if (rc < 0)
return rc;
/* Write 9-bit VCM DAC Code */
rc = vb6801_i2c_write_w(REG_VCM_DAC_CODE, ps->vcm_dac_code);
if (rc < 0)
return rc;
/* Generate a rising edge on the dac_strobe to latch
* new DAC value */
rc = vb6801_i2c_write_w(REG_VCM_DAC_STROBE, 0x01);
return rc;
}
static int32_t vb6801_move_focus(int direction, int32_t num_steps)
{
int16_t step_direction;
int16_t actual_step;
int16_t next_position;
uint32_t step_size;
int16_t small_move[4];
uint16_t i;
int32_t rc = 0;
step_size = (vb6801_ctrl->s_info.vcm_dac_code_macro_up -
vb6801_ctrl->s_info.vcm_dac_code_infinity_dn) /
VB6801_TOTAL_STEPS_NEAR_TO_FAR;
if (num_steps > VB6801_TOTAL_STEPS_NEAR_TO_FAR)
num_steps = VB6801_TOTAL_STEPS_NEAR_TO_FAR;
else if (num_steps == 0)
return -EINVAL;
if (direction == MOVE_NEAR)
step_direction = 4;
else if (direction == MOVE_FAR)
step_direction = -4;
else
return -EINVAL;
/* need to decide about default position and power supplied
* at start up and reset */
if (vb6801_ctrl->curr_lens_pos < vb6801_ctrl->init_curr_lens_pos)
vb6801_ctrl->curr_lens_pos = vb6801_ctrl->init_curr_lens_pos;
actual_step = (step_direction * num_steps);
next_position = vb6801_ctrl->curr_lens_pos;
for (i = 0; i < 4; i++) {
if (actual_step >= 0)
small_move[i] =
(i + 1) * actual_step / 4 - i * actual_step / 4;
if (actual_step < 0)
small_move[i] =
(i + 1) * actual_step / 4 - i * actual_step / 4;
}
if (next_position > 511)
next_position = 511;
else if (next_position < 0)
next_position = 0;
/* for damping */
for (i = 0; i < 4; i++) {
next_position =
(int16_t) (vb6801_ctrl->curr_lens_pos + small_move[i]);
/* Writing the digital code for current to the actuator */
CDBG("next_position in damping mode = %d\n", next_position);
rc = vb6801_go_to_position(next_position, &vb6801_ctrl->s_info);
if (rc < 0) {
CDBG("go_to_position Failed!!!\n");
return rc;
}
vb6801_ctrl->curr_lens_pos = next_position;
if (i < 3)
mdelay(5);
}
return rc;
}
static int vb6801_read_nvm_data(struct vb6801_sensor_info *ps)
{
/* +--------+------+------+----------------+---------------+
* | Index | NVM | NVM | Name | Description |
* | | Addr | Byte | | |
* +--------+------+------+----------------+---------------+
* | 0x3600 | 0 | 3 | nvm_t1_addr_00 | {PF[2:0]:Day[4:0]} |
* | 0x3601 | 0 | 2 | nvm_t1_addr_01 | {Month[3:0]:Year[3:0]} |
* | 0x3602 | 0 | 1 | nvm_t1_addr_02 | Tester[7:0] |
* | 0x3603 | 0 | 0 | nvm_t1_addr_03 | Part[15:8] |
* +--------+------+------+----------------+---------------+
* | 0x3604 | 1 | 3 | nvm_t1_addr_04 | Part[7:0] |
* | 0x3605 | 1 | 2 | nvm_t1_addr_05 | StartWPM[7:0] |
* | 0x3606 | 1 | 1 | nvm_t1_addr_06 | Infinity[7:0] |
* | 0x3607 | 1 | 0 | nvm_t1_addr_07 | Macro[7:0] |
* +--------+------+------+----------------+---------------+
* | 0x3608 | 2 | 3 | nvm_t1_addr_08 | Reserved |
* | 0x3609 | 2 | 2 | nvm_t1_addr_09 | Reserved |
* | 0x360A | 2 | 1 | nvm_t1_addr_0A | UpDown[7:0] |
* | 0x360B | 2 | 0 | nvm_t1_addr_0B | Reserved |
* +--------+------+------+----------------+---------------+
* | 0x360C | 3 | 3 | nvm_t1_addr_0C | Reserved |
* | 0x360D | 3 | 2 | nvm_t1_addr_0D | Reserved |
* | 0x360E | 3 | 1 | nvm_t1_addr_0E | Reserved |
* | 0x360F | 3 | 0 | nvm_t1_addr_0F | Reserved |
* +--------+------+------+----------------+---------------+
* | 0x3610 | 4 | 3 | nvm_t1_addr_10 | Reserved |
* | 0x3611 | 4 | 2 | nvm_t1_addr_11 | Reserved |
* | 0x3612 | 4 | 1 | nvm_t1_addr_12 | Reserved |
* | 0x3613 | 4 | 0 | nvm_t1_addr_13 | Reserved |
* +--------+------+------+----------------+---------------+*/
int32_t rc;
struct vb6801_i2c_reg_conf_t rreg[] = {
{REG_NVM_T1_ADDR_00, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_01, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_02, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_03, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_04, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_05, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_06, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_07, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_08, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_09, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0A, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0B, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0C, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0D, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0E, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_0F, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_10, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_11, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_12, 0, 0, D_LEN_BYTE},
{REG_NVM_T1_ADDR_13, 0, 0, D_LEN_BYTE},
};
struct vb6801_i2c_reg_conf_t wreg[] = {
/* Enable NVM for Direct Reading */
{REG_NVM_CTRL, 0, 2, D_LEN_BYTE},
/* Power up NVM */
{REG_NVM_PDN, 0, 1, D_LEN_BYTE},
};
rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
return rc;
}
/* NVM Read Pulse Width
* ====================
* nvm_pulse_width_us = nvm_pulse_width_ext_clk / ext_clk_freq_mhz
* Valid Range for Read Pulse Width = 400ns -> 3.0us
* Min ext_clk_freq_mhz = 6MHz => 3.0 * 6 = 18
* Max ext_clk_freq_mhz = 27MHz => 0.4 * 27 = 10.8
* Choose 15 as a common value
* - 15 / 6.0 = 2.5000us
* - 15 / 12.0 = 1.2500us
* - 15 / 27.0 = 0.5555us */
rc = vb6801_i2c_write_w(REG_NVM_PULSE_WIDTH, 15);
if (rc < 0) {
rc = -EBUSY;
goto nv_shutdown;
}
rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
if (rc < 0) {
CDBG("I2C Read Table FAILED!!!\n");
rc = -EBUSY;
goto nv_shutdown;
}
/* Decode and Save FMT Info */
ps->pass_fail = (rreg[0].bdata & 0x00E0) >> 5;
ps->day = (rreg[0].bdata & 0x001F);
ps->month = (rreg[1].bdata & 0x00F0) >> 4;
ps->year = (rreg[1].bdata & 0x000F) + 2000;
ps->tester = rreg[2].bdata;
ps->part_number = (rreg[3].bdata << 8) + rreg[4].bdata;
/* Decode and Save VCM Dac Values in data structure */
ps->vcm_dac_code_infinity_dn = rreg[6].bdata;
ps->vcm_dac_code_macro_up = rreg[7].bdata << 1;
ps->vcm_dac_code_up_dn_delta = rreg[10].bdata;
nv_shutdown:
/* Power Down NVM to extend life time */
rc = vb6801_i2c_write_b(REG_NVM_PDN, 0);
return rc;
}
static int vb6801_config_sensor(int32_t ext_clk_freq_mhz,
int32_t target_frame_rate_fps,
int32_t target_vt_pix_clk_freq_mhz,
uint32_t sub_sampling_factor,
uint32_t analog_binning_allowed,
uint32_t raw_mode, int capture_mode,
enum vb6801_resolution_t res)
{
uint32_t rc;
/* ext_clk_freq_mhz = 6.0 -> 27.0 MHz
* target_frame_rate_fps = 15 fps
* target_vt_pix_clk_freq_mhz = 24.0 -> 64.0MHz
* sub_sampling factor = 1, 2, 3, or 4
* raw_mode factor = 10
*
* capture_mode, 0 = CCP1
* capture_mode, 1 = CCP2
* capture_mode, 2 = 10-bit parallel + hsync + vsync */
/* Declare data format */
uint32_t ccp2_data_format = 0x0A0A;
/* Declare clock tree variables */
int32_t min_pll_ip_freq_mhz = 6;
int32_t max_pll_op_freq_mhz = 640;
uint32_t pre_pll_clk_div = 1;
int32_t pll_ip_freq_mhz = 6;
uint32_t pll_multiplier = 100;
int32_t pll_op_freq_mhz = 600;
uint32_t vt_sys_clk_div = 1;
int32_t vt_sys_clk_freq_mhz = 600;
uint32_t vt_pix_clk_div = 10;
int32_t vt_pix_clk_freq_mhz = 60;
uint32_t op_sys_clk_div = 1;
int32_t op_sys_clk_freq_mhz = 60;
uint32_t op_pix_clk_div = 10;
int32_t op_pix_clk_freq_mhz = 60;
/* Declare pixel array and frame timing variables */
uint32_t x_pixel_array = 2064;
uint32_t y_pixel_array = 1544;
uint32_t x_even_inc = 1;
uint32_t x_odd_inc = 1;
uint32_t y_even_inc = 1;
uint32_t y_odd_inc = 1;
uint32_t x_output_size = 2064;
uint32_t y_output_size = 1544;
uint32_t additional_rows = 2;
uint32_t min_vt_frame_blanking_lines = 16;
uint32_t vt_line_length_pck = 2500;
uint32_t vt_line_length_us = 0;
uint32_t min_vt_frame_length_lines = 1562;
uint32_t vt_frame_length_lines = 1600;
uint32_t target_vt_frame_length_ms; /* 200 * 0x0001000 / 3; */
uint32_t vt_frame_length_ms; /* 200 * 0x0001000 / 3; */
uint32_t frame_rate_fps = 15;
/* Coarse intergration time */
uint32_t coarse_integration_time = 1597;
uint32_t coarse_integration_time_max_margin = 3;
uint16_t frame_count;
int timeout;
struct vb6801_sensor_info *pinfo = &vb6801_ctrl->s_info;
struct vb6801_i2c_reg_conf_t rreg[] = {
{REG_PRE_PLL_CLK_DIV, 0, 0, D_LEN_WORD},
{REG_PLL_MULTIPLIER, 0, 0, D_LEN_WORD},
{REG_VT_SYS_CLK_DIV, 0, 0, D_LEN_WORD},
{REG_VT_PIX_CLK_DIV, 0, 0, D_LEN_WORD},
{REG_OP_SYS_CLK_DIV, 0, 0, D_LEN_WORD},
{REG_OP_PIX_CLK_DIV, 0, 0, D_LEN_WORD},
{REG_FRAME_COUNT, 0, 0, D_LEN_BYTE},
};
struct vb6801_i2c_reg_conf_t wreg2[] = {
{REG_POWER_MAN_ENABLE_3, 0, 95, D_LEN_BYTE},
{REG_POWER_MAN_ENABLE_4, 0, 142, D_LEN_BYTE},
{REG_POWER_MAN_ENABLE_5, 0, 7, D_LEN_BYTE},
};
/* VIDEO TIMING CALCULATIONS
* ========================= */
/* Pixel Array Size */
x_pixel_array = 2064;
y_pixel_array = 1544;
/* set current resolution */
vb6801_ctrl->curr_res = res;
/* Analogue binning setup */
if (pinfo->analog_binning_allowed > 0 &&
pinfo->sub_sampling_factor == 4) {
pinfo->vtiming_major = 1;
pinfo->analog_timing_modes_4 = 32;
} else if (pinfo->analog_binning_allowed > 0 &&
pinfo->sub_sampling_factor == 2) {
pinfo->vtiming_major = 1;
pinfo->analog_timing_modes_4 = 0;
} else {
pinfo->vtiming_major = 0;
pinfo->analog_timing_modes_4 = 0;
}
/* Sub-Sampling X & Y Odd Increments: valid values 1, 3, 5, 7 */
x_even_inc = 1;
y_even_inc = 1;
x_odd_inc = (sub_sampling_factor << 1) - x_even_inc;
y_odd_inc = (sub_sampling_factor << 1) - y_even_inc;
/* Output image size
* Must always be a multiple of 2 - round down */
x_output_size = ((x_pixel_array / sub_sampling_factor) >> 1) << 1;
y_output_size = ((y_pixel_array / sub_sampling_factor) >> 1) << 1;
/* Output data format */
ccp2_data_format = (raw_mode << 8) + raw_mode;
/* Pre PLL clock divider : valid values 1, 2 or 4
* The 1st step is to ensure that PLL input frequency is as close
* as possible to the min allowed PLL input frequency.
* This yields the smallest step size in the PLL output frequency. */
pre_pll_clk_div =
((int)(ext_clk_freq_mhz / min_pll_ip_freq_mhz) >> 1) << 1;
if (pre_pll_clk_div < 2)
pre_pll_clk_div = 1;
pll_ip_freq_mhz = ext_clk_freq_mhz / pre_pll_clk_div;
/* Video Timing System Clock divider: valid values 1, 2, 4
* Now need to work backwards through the clock tree to determine the
* 1st pass estimates for vt_sys_clk_freq_mhz and then the PLL output
* frequency.*/
vt_sys_clk_freq_mhz = vt_pix_clk_div * target_vt_pix_clk_freq_mhz;
vt_sys_clk_div = max_pll_op_freq_mhz / vt_sys_clk_freq_mhz;
if (vt_sys_clk_div < 2)
vt_sys_clk_div = 1;
/* PLL Mulitplier: min , max 106 */
pll_op_freq_mhz = vt_sys_clk_div * vt_sys_clk_freq_mhz;
pll_multiplier = (pll_op_freq_mhz * 0x0001000) / pll_ip_freq_mhz;
/* Calculate the acutal pll output frequency
* - the pll_multiplier calculation introduces a quantisation error
* due the integer nature of the pll multiplier */
pll_op_freq_mhz = (pll_ip_freq_mhz * pll_multiplier) / 0x0001000;
/* Re-calculate video timing clock frequencies based
* on actual PLL freq */
vt_sys_clk_freq_mhz = pll_op_freq_mhz / vt_sys_clk_div;
vt_pix_clk_freq_mhz = ((vt_sys_clk_freq_mhz * 0x0001000) /
vt_pix_clk_div)/0x0001000;
/* Output System Clock Divider: valid value 1, 2, 4, 6, 8
* op_sys_clk_div = vt_sys_clk_div;*/
op_sys_clk_div = (vt_sys_clk_div * sub_sampling_factor);
if (op_sys_clk_div < 2)
op_sys_clk_div = 1;
/* Calculate output timing clock frequencies */
op_sys_clk_freq_mhz = pll_op_freq_mhz / op_sys_clk_div;
op_pix_clk_freq_mhz =
(op_sys_clk_freq_mhz * 0x0001000) / (op_pix_clk_div * 0x0001000);
/* Line length in pixels and us */
vt_line_length_pck = 2500;
vt_line_length_us =
vt_line_length_pck * 0x0001000 / vt_pix_clk_freq_mhz;
/* Target vt_frame_length_ms */
target_vt_frame_length_ms = (1000 * 0x0001000 / target_frame_rate_fps);
/* Frame length in lines */
min_vt_frame_length_lines =
additional_rows + y_output_size + min_vt_frame_blanking_lines;
vt_frame_length_lines =
((1000 * target_vt_frame_length_ms) / vt_line_length_us);
if (vt_frame_length_lines <= min_vt_frame_length_lines)
vt_frame_length_lines = min_vt_frame_length_lines;
/* Calcuate the actual frame length in ms */
vt_frame_length_ms = (vt_frame_length_lines * vt_line_length_us / 1000);
/* Frame Rate in fps */
frame_rate_fps = (1000 * 0x0001000 / vt_frame_length_ms);
/* Set coarse integration to max */
coarse_integration_time =
vt_frame_length_lines - coarse_integration_time_max_margin;
CDBG("SENSOR VIDEO TIMING SUMMARY:\n");
CDBG(" ============================\n");
CDBG("ext_clk_freq_mhz = %d\n", ext_clk_freq_mhz);
CDBG("pre_pll_clk_div = %d\n", pre_pll_clk_div);
CDBG("pll_ip_freq_mhz = %d\n", pll_ip_freq_mhz);
CDBG("pll_multiplier = %d\n", pll_multiplier);
CDBG("pll_op_freq_mhz = %d\n", pll_op_freq_mhz);
CDBG("vt_sys_clk_div = %d\n", vt_sys_clk_div);
CDBG("vt_sys_clk_freq_mhz = %d\n", vt_sys_clk_freq_mhz);
CDBG("vt_pix_clk_div = %d\n", vt_pix_clk_div);
CDBG("vt_pix_clk_freq_mhz = %d\n", vt_pix_clk_freq_mhz);
CDBG("op_sys_clk_div = %d\n", op_sys_clk_div);
CDBG("op_sys_clk_freq_mhz = %d\n", op_sys_clk_freq_mhz);
CDBG("op_pix_clk_div = %d\n", op_pix_clk_div);
CDBG("op_pix_clk_freq_mhz = %d\n", op_pix_clk_freq_mhz);
CDBG("vt_line_length_pck = %d\n", vt_line_length_pck);
CDBG("vt_line_length_us = %d\n", vt_line_length_us/0x0001000);
CDBG("vt_frame_length_lines = %d\n", vt_frame_length_lines);
CDBG("vt_frame_length_ms = %d\n", vt_frame_length_ms/0x0001000);
CDBG("frame_rate_fps = %d\n", frame_rate_fps);
CDBG("ccp2_data_format = %d\n", ccp2_data_format);
CDBG("x_output_size = %d\n", x_output_size);
CDBG("y_output_size = %d\n", y_output_size);
CDBG("x_odd_inc = %d\n", x_odd_inc);
CDBG("y_odd_inc = %d\n", y_odd_inc);
CDBG("(vt_frame_length_lines * frame_rate_factor ) = %d\n",
(vt_frame_length_lines * vb6801_ctrl->factor_fps));
CDBG("coarse_integration_time = %d\n", coarse_integration_time);
CDBG("pinfo->vcm_dac_code = %d\n", pinfo->vcm_dac_code);
CDBG("capture_mode = %d\n", capture_mode);
/* RE-CONFIGURE SENSOR WITH NEW TIMINGS
* ====================================
* Enter Software Standby Mode */
rc = vb6801_i2c_write_b(REG_MODE_SELECT, 0);
if (rc < 0) {
CDBG("I2C vb6801_i2c_write_b FAILED!!!\n");
return rc;
}
/* Wait 100ms */
mdelay(100);
if (capture_mode == 0) {
rc = vb6801_i2c_write_b(REG_CCP2_CHANNEL_IDENTIFIER, 0);
rc = vb6801_i2c_write_b(REG_CCP2_SIGNALLING_MODE, 0);
} else if (capture_mode == 1) {
rc = vb6801_i2c_write_b(REG_CCP2_CHANNEL_IDENTIFIER, 0);
rc = vb6801_i2c_write_b(REG_CCP2_SIGNALLING_MODE, 1);
}
{
struct vb6801_i2c_reg_conf_t wreg[] = {
/* Re-configure Sensor */
{REG_CCP2_DATA_FORMAT, ccp2_data_format, 0,
D_LEN_WORD},
{REG_ANALOGUE_GAIN_CODE_GLOBAL, 128, 0, D_LEN_WORD},
{REG_PRE_PLL_CLK_DIV, pre_pll_clk_div, 0, D_LEN_WORD},
{REG_VT_SYS_CLK_DIV, vt_sys_clk_div, 0, D_LEN_WORD},
{REG_VT_PIX_CLK_DIV, vt_pix_clk_div, 0, D_LEN_WORD},
{REG_OP_SYS_CLK_DIV, vt_sys_clk_div, 0, D_LEN_WORD},
{REG_OP_PIX_CLK_DIV, vt_pix_clk_div, 0, D_LEN_WORD},
{REG_VT_LINE_LENGTH_PCK, vt_line_length_pck, 0,
D_LEN_WORD},
{REG_X_OUTPUT_SIZE, x_output_size, 0, D_LEN_WORD},
{REG_Y_OUTPUT_SIZE, y_output_size, 0, D_LEN_WORD},
{REG_X_ODD_INC, x_odd_inc, 0, D_LEN_WORD},
{REG_Y_ODD_INC, y_odd_inc, 0, D_LEN_WORD},
{REG_VT_FRAME_LENGTH_LINES,
vt_frame_length_lines * vb6801_ctrl->factor_fps, 0,
D_LEN_WORD},
{REG_COARSE_INTEGRATION_TIME,
coarse_integration_time, 0, D_LEN_WORD},
/* Analogue Settings */
{REG_ANALOG_TIMING_MODES_2, 0, 132, D_LEN_BYTE},
{REG_RAMP_SCALE, 0, 5, D_LEN_BYTE},
{REG_BTL_LEVEL_SETUP, 0, 11, D_LEN_BYTE},
/* Enable Defect Correction */
{REG_SCYTHE_ENABLE, 0, 1, D_LEN_BYTE},
{REG_SCYTHE_WEIGHT, 0, 16, D_LEN_BYTE},
{REG_BRUCE_ENABLE, 0, 1, D_LEN_BYTE},
/* Auto Focus Configuration
* Please note that the DAC Code is a written as a
* 16-bit value 0 = infinity (no DAC current) */
{REG_VCM_DAC_CODE, pinfo->vcm_dac_code, 0, D_LEN_WORD},
{REG_VCM_DAC_STROBE, 0, 0, D_LEN_BYTE},
{REG_VCM_DAC_ENABLE, 0, 1, D_LEN_BYTE},
};
rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
return rc;
}
}
/* Parallel Interface Configuration */
if (capture_mode >= 2) {
struct vb6801_i2c_reg_conf_t wreg1[] = {
{REG_OP_CODER_SYNC_CLK_SETUP, 0, 15, D_LEN_BYTE},
{REG_OP_CODER_ENABLE, 0, 3, D_LEN_BYTE},
{REG_OP_CODER_SLOW_PAD_EN, 0, 1, D_LEN_BYTE},
{REG_OP_CODER_AUTOMATIC_MODE_ENABLE, 0, 3, D_LEN_BYTE},
{REG_OP_CODER_AUTO_STARTUP, 0, 2, D_LEN_BYTE},
};
rc = vb6801_i2c_write_table(wreg1, ARRAY_SIZE(wreg1));
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
return rc;
}
}
/* Enter Streaming Mode */
rc = vb6801_i2c_write_b(REG_MODE_SELECT, 1);
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
return rc;
}
/* Wait until the sensor starts streaming
* Poll until the reported frame_count value is != 0xFF */
frame_count = 0xFF;
timeout = 2000;
while (frame_count == 0xFF && timeout > 0) {
rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
if (rc < 0)
return rc;
CDBG("REG_FRAME_COUNT = 0x%x\n", frame_count);
timeout--;
}
/* Post Streaming Configuration */
rc = vb6801_i2c_write_table(wreg2, ARRAY_SIZE(wreg2));
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
return rc;
}
rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
if (rc < 0) {
CDBG("I2C Read Table FAILED!!!\n");
return rc;
}
CDBG("REG_PRE_PLL_CLK_DIV = 0x%x\n", rreg[0].wdata);
CDBG("REG_PLL_MULTIPLIER = 0x%x\n", rreg[1].wdata);
CDBG("REG_VT_SYS_CLK_DIV = 0x%x\n", rreg[2].wdata);
CDBG("REG_VT_PIX_CLK_DIV = 0x%x\n", rreg[3].wdata);
CDBG("REG_OP_SYS_CLK_DIV = 0x%x\n", rreg[4].wdata);
CDBG("REG_OP_PIX_CLK_DIV = 0x%x\n", rreg[5].wdata);
CDBG("REG_FRAME_COUNT = 0x%x\n", rreg[6].bdata);
mdelay(50);
frame_count = 0;
rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
CDBG("REG_FRAME_COUNT1 = 0x%x\n", frame_count);
mdelay(150);
frame_count = 0;
rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
CDBG("REG_FRAME_COUNT2 = 0x%x\n", frame_count);
mdelay(100);
frame_count = 0;
rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
CDBG("REG_FRAME_COUNT3 = 0x%x\n", frame_count);
mdelay(250);
frame_count = 0;
rc = vb6801_i2c_read(REG_FRAME_COUNT, &frame_count, 1);
CDBG("REG_FRAME_COUNT4 = 0x%x\n", frame_count);
return rc;
}
static int vb6801_sensor_init_done(const struct msm_camera_sensor_info *data)
{
gpio_direction_output(data->sensor_reset, 0);
gpio_free(data->sensor_reset);
return 0;
}
static int vb6801_init_client(struct i2c_client *client)
{
/* Initialize the MSM_CAMI2C Chip */
init_waitqueue_head(&vb6801_wait_queue);
return 0;
}
static int32_t vb6801_video_config(int mode, int res)
{
int32_t rc = 0;
vb6801_ctrl->prev_res = res;
vb6801_ctrl->curr_res = res;
vb6801_ctrl->sensormode = mode;
rc = vb6801_config_sensor(12, 30, 60, 2, 1, 10, 2, RES_PREVIEW);
if (rc < 0)
return rc;
rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
&vb6801_ctrl->s_dynamic_params.
preview_pixelsPerLine, 2);
if (rc < 0)
return rc;
rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
&vb6801_ctrl->s_dynamic_params.
preview_linesPerFrame, 2);
return rc;
}
static int32_t vb6801_snapshot_config(int mode, int res)
{
int32_t rc = 0;
vb6801_ctrl->curr_res = vb6801_ctrl->pict_res;
vb6801_ctrl->sensormode = mode;
rc = vb6801_config_sensor(12, 12, 48, 1, 1, 10, 2, RES_CAPTURE);
if (rc < 0)
return rc;
rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
&vb6801_ctrl->s_dynamic_params.
snapshot_pixelsPerLine, 2);
if (rc < 0)
return rc;
rc = vb6801_i2c_read(REG_VT_LINE_LENGTH_PCK,
&vb6801_ctrl->s_dynamic_params.
snapshot_linesPerFrame, 2);
return rc;
}
static int32_t vb6801_set_sensor_mode(int mode, int res)
{
int32_t rc = 0;
switch (mode) {
case SENSOR_PREVIEW_MODE:
rc = vb6801_video_config(mode, res);
break;
case SENSOR_SNAPSHOT_MODE:
case SENSOR_RAW_SNAPSHOT_MODE:
rc = vb6801_snapshot_config(mode, res);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
int vb6801_sensor_config(void __user *argp)
{
struct sensor_cfg_data cdata;
long rc = 0;
if (copy_from_user(&cdata,
(void *)argp, sizeof(struct sensor_cfg_data)))
return -EFAULT;
mutex_lock(&vb6801_mut);
CDBG("vb6801_sensor_config, cfgtype = %d\n", cdata.cfgtype);
switch (cdata.cfgtype) {
case CFG_GET_PICT_FPS:
vb6801_get_pict_fps(cdata.cfg.gfps.prevfps,
&(cdata.cfg.gfps.pictfps));
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_L_PF:
cdata.cfg.prevl_pf = vb6801_get_prev_lines_pf();
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_P_PL:
cdata.cfg.prevp_pl = vb6801_get_prev_pixels_pl();
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_L_PF:
cdata.cfg.pictl_pf = vb6801_get_pict_lines_pf();
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_P_PL:
cdata.cfg.pictp_pl = vb6801_get_pict_pixels_pl();
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_MAX_EXP_LC:
cdata.cfg.pict_max_exp_lc = vb6801_get_pict_max_exp_lc();
if (copy_to_user((void *)argp,
&cdata, sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_SET_FPS:
case CFG_SET_PICT_FPS:
rc = vb6801_set_fps(&(cdata.cfg.fps));
break;
case CFG_SET_EXP_GAIN:
rc = vb6801_write_exp_gain(cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_PICT_EXP_GAIN:
rc = vb6801_set_pict_exp_gain(cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_MODE:
rc = vb6801_set_sensor_mode(cdata.mode, cdata.rs);
break;
case CFG_PWR_DOWN:
rc = vb6801_power_down();
break;
case CFG_MOVE_FOCUS:
rc = vb6801_move_focus(cdata.cfg.focus.dir,
cdata.cfg.focus.steps);
break;
case CFG_SET_DEFAULT_FOCUS:
rc = vb6801_set_default_focus();
break;
default:
rc = -EFAULT;
break;
}
mutex_unlock(&vb6801_mut);
return rc;
}
static int vb6801_sensor_release(void)
{
int rc = -EBADF;
mutex_lock(&vb6801_mut);
vb6801_power_down();
vb6801_sensor_init_done(vb6801_ctrl->sensordata);
kfree(vb6801_ctrl);
mutex_unlock(&vb6801_mut);
return rc;
}
static int vb6801_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc = 0;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
rc = -ENOTSUPP;
goto probe_failure;
}
vb6801_sensorw = kzalloc(sizeof(struct vb6801_work_t), GFP_KERNEL);
if (!vb6801_sensorw) {
rc = -ENOMEM;
goto probe_failure;
}
i2c_set_clientdata(client, vb6801_sensorw);
vb6801_init_client(client);
vb6801_client = client;
vb6801_client->addr = vb6801_client->addr >> 1;
return 0;
probe_failure:
if (vb6801_sensorw != NULL) {
kfree(vb6801_sensorw);
vb6801_sensorw = NULL;
}
return rc;
}
static int __exit vb6801_i2c_remove(struct i2c_client *client)
{
struct vb6801_work_t *sensorw = i2c_get_clientdata(client);
free_irq(client->irq, sensorw);
vb6801_client = NULL;
kfree(sensorw);
return 0;
}
static const struct i2c_device_id vb6801_i2c_id[] = {
{"vb6801", 0},
{}
};
static struct i2c_driver vb6801_i2c_driver = {
.id_table = vb6801_i2c_id,
.probe = vb6801_i2c_probe,
.remove = __exit_p(vb6801_i2c_remove),
.driver = {
.name = "vb6801",
},
};
static int vb6801_probe_init_sensor(const struct msm_camera_sensor_info *data)
{
int rc;
struct vb6801_i2c_reg_conf_t rreg[] = {
{0x0000, 0, 0, D_LEN_BYTE},
{0x0001, 0, 0, D_LEN_BYTE},
};
rc = vb6801_reset(data);
if (rc < 0)
goto init_probe_done;
mdelay(20);
rc = vb6801_i2c_read_table(rreg, ARRAY_SIZE(rreg));
if (rc < 0) {
CDBG("I2C Read Table FAILED!!!\n");
goto init_probe_fail;
}
/* 4. Compare sensor ID to VB6801 ID: */
if (rreg[0].bdata != 0x03 || rreg[1].bdata != 0x53) {
CDBG("vb6801_sensor_init: sensor ID don't match!\n");
goto init_probe_fail;
}
goto init_probe_done;
init_probe_fail:
vb6801_sensor_init_done(data);
init_probe_done:
return rc;
}
int vb6801_sensor_open_init(const struct msm_camera_sensor_info *data)
{
int32_t rc;
struct vb6801_i2c_reg_conf_t wreg[] = {
{REG_MODE_SELECT, 0, STANDBY_MODE, D_LEN_BYTE},
{0x0113, 0, 0x0A, D_LEN_BYTE},
};
vb6801_ctrl = kzalloc(sizeof(struct vb6801_ctrl_t), GFP_KERNEL);
if (!vb6801_ctrl) {
rc = -ENOMEM;
goto open_init_fail1;
}
vb6801_ctrl->factor_fps = 1 /** 0x00000400*/ ;
vb6801_ctrl->curr_fps = 7680; /* 30 * Q8 */ ;
vb6801_ctrl->max_fps = 7680; /* 30 * Q8 */ ;
vb6801_ctrl->pict_exp_update = 0; /* 30 * Q8 */ ;
vb6801_ctrl->reducel = 0; /* 30 * Q8 */ ;
vb6801_ctrl->set_test = TEST_OFF;
vb6801_ctrl->prev_res = QTR_SIZE;
vb6801_ctrl->pict_res = FULL_SIZE;
vb6801_ctrl->s_dynamic_params.preview_linesPerFrame =
VB6801_LINES_PER_FRAME_PREVIEW;
vb6801_ctrl->s_dynamic_params.preview_pixelsPerLine =
VB6801_PIXELS_PER_LINE_PREVIEW;
vb6801_ctrl->s_dynamic_params.snapshot_linesPerFrame =
VB6801_LINES_PER_FRAME_SNAPSHOT;
vb6801_ctrl->s_dynamic_params.snapshot_pixelsPerLine =
VB6801_PIXELS_PER_LINE_SNAPSHOT;
if (data)
vb6801_ctrl->sensordata = data;
/* enable mclk first */
msm_camio_clk_rate_set(VB6801_DEFAULT_CLOCK_RATE);
mdelay(20);
rc = vb6801_reset(data);
if (rc < 0)
goto open_init_fail1;
rc = vb6801_i2c_write_table(wreg, ARRAY_SIZE(wreg));
if (rc < 0) {
CDBG("I2C Write Table FAILED!!!\n");
goto open_init_fail2;
}
rc = vb6801_read_nvm_data(&vb6801_ctrl->s_info);
if (rc < 0) {
CDBG("vb6801_read_nvm_data FAILED!!!\n");
goto open_init_fail2;
}
mdelay(66);
rc = vb6801_config_sensor(12, 30, 60, 2, 1, 10, 2, RES_PREVIEW);
if (rc < 0)
goto open_init_fail2;
goto open_init_done;
open_init_fail2:
vb6801_sensor_init_done(data);
open_init_fail1:
kfree(vb6801_ctrl);
open_init_done:
return rc;
}
static int vb6801_sensor_probe(const struct msm_camera_sensor_info *info,
struct msm_sensor_ctrl *s)
{
int rc = i2c_add_driver(&vb6801_i2c_driver);
if (rc < 0 || vb6801_client == NULL) {
rc = -ENOTSUPP;
goto probe_done;
}
/* enable mclk first */
msm_camio_clk_rate_set(VB6801_DEFAULT_CLOCK_RATE);
mdelay(20);
rc = vb6801_probe_init_sensor(info);
if (rc < 0)
goto probe_done;
s->s_init = vb6801_sensor_open_init;
s->s_release = vb6801_sensor_release;
s->s_config = vb6801_sensor_config;
s->s_mount_angle = 0;
vb6801_sensor_init_done(info);
probe_done:
return rc;
}
static int __vb6801_probe(struct platform_device *pdev)
{
return msm_camera_drv_start(pdev, vb6801_sensor_probe);
}
static struct platform_driver msm_camera_driver = {
.probe = __vb6801_probe,
.driver = {
.name = "msm_camera_vb6801",
.owner = THIS_MODULE,
},
};
static int __init vb6801_init(void)
{
return platform_driver_register(&msm_camera_driver);
}
module_init(vb6801_init);
void vb6801_exit(void)
{
i2c_del_driver(&vb6801_i2c_driver);
}
| gpl-2.0 |
ugur2323/WhisperKernelAveaInTouch4 | drivers/hv/hv_util.c | 1252 | 9571 | /*
* Copyright (c) 2010, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/reboot.h>
#include <linux/hyperv.h>
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
};
static void timesync_onchannelcallback(void *context);
static struct hv_util_service util_timesynch = {
.util_cb = timesync_onchannelcallback,
};
static void heartbeat_onchannelcallback(void *context);
static struct hv_util_service util_heartbeat = {
.util_cb = heartbeat_onchannelcallback,
};
static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init,
.util_deinit = hv_kvp_deinit,
};
static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init,
.util_deinit = hv_vss_deinit,
};
static void perform_shutdown(struct work_struct *dummy)
{
orderly_poweroff(true);
}
/*
* Perform the shutdown operation in a thread context.
*/
static DECLARE_WORK(shutdown_work, perform_shutdown);
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
u8 execute_shutdown = false;
u8 *shut_txf_buf = util_shutdown.recv_buffer;
struct shutdown_msg_data *shutdown_msg;
struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, shut_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
shut_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
} else {
shutdown_msg =
(struct shutdown_msg_data *)&shut_txf_buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
switch (shutdown_msg->flags) {
case 0:
case 1:
icmsghdrp->status = HV_S_OK;
execute_shutdown = true;
pr_info("Shutdown request received -"
" graceful shutdown initiated\n");
break;
default:
icmsghdrp->status = HV_E_FAIL;
execute_shutdown = false;
pr_info("Shutdown request received -"
" Invalid request\n");
break;
}
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, shut_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
if (execute_shutdown == true)
schedule_work(&shutdown_work);
}
/*
* Set guest time to host UTC time.
*/
static inline void do_adj_guesttime(u64 hosttime)
{
s64 host_tns;
struct timespec host_ts;
host_tns = (hosttime - WLTIMEDELTA) * 100;
host_ts = ns_to_timespec(host_tns);
do_settimeofday(&host_ts);
}
/*
* Set the host time in a process context.
*/
struct adj_time_work {
struct work_struct work;
u64 host_time;
};
static void hv_set_host_time(struct work_struct *work)
{
struct adj_time_work *wrk;
wrk = container_of(work, struct adj_time_work, work);
do_adj_guesttime(wrk->host_time);
kfree(wrk);
}
/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
* After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
* message after the timesync channel is opened. Since the hv_utils module is
* loaded after hv_vmbus, the first message is usually missed. The other
* thing is, systime is automatically set to emulated hardware clock which may
* not be UTC time or in the same time zone. So, to override these effects, we
* use the first 50 time samples for initial system time setting.
*/
static inline void adj_guesttime(u64 hosttime, u8 flags)
{
struct adj_time_work *wrk;
static s32 scnt = 50;
wrk = kmalloc(sizeof(struct adj_time_work), GFP_ATOMIC);
if (wrk == NULL)
return;
wrk->host_time = hosttime;
if ((flags & ICTIMESYNCFLAG_SYNC) != 0) {
INIT_WORK(&wrk->work, hv_set_host_time);
schedule_work(&wrk->work);
return;
}
if ((flags & ICTIMESYNCFLAG_SAMPLE) != 0 && scnt > 0) {
scnt--;
INIT_WORK(&wrk->work, hv_set_host_time);
schedule_work(&wrk->work);
} else
kfree(wrk);
}
/*
* Time Sync Channel message handler.
*/
static void timesync_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct ictimesync_data *timedatap;
u8 *time_txf_buf = util_timesynch.recv_buffer;
vmbus_recvpacket(channel, time_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
MAX_SRV_VER, MAX_SRV_VER);
} else {
timedatap = (struct ictimesync_data *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime, timedatap->flags);
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, time_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
}
/*
* Heartbeat functionality.
* Every two seconds, Hyper-V send us a heartbeat request message.
* we respond to this message, and Hyper-V knows we are alive.
*/
static void heartbeat_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct icmsg_hdr *icmsghdrp;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, NULL,
hbeat_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
} else {
heartbeat_msg =
(struct heartbeat_msg_data *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
heartbeat_msg->seq_num += 1;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, hbeat_txf_buf,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
}
static int util_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
struct hv_util_service *srv =
(struct hv_util_service *)dev_id->driver_data;
int ret;
srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
ret = -ENODEV;
goto error1;
}
}
/*
* The set of services managed by the util driver are not performance
* critical and do not need batched reading. Furthermore, some services
* such as KVP can only handle one message from the host at a time.
* Turn off batched reading for all util drivers before we open the
* channel.
*/
set_channel_read_state(dev->channel, false);
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
goto error;
hv_set_drvdata(dev, srv);
return 0;
error:
if (srv->util_deinit)
srv->util_deinit();
error1:
kfree(srv->recv_buffer);
return ret;
}
static int util_remove(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
vmbus_close(dev->channel);
if (srv->util_deinit)
srv->util_deinit();
kfree(srv->recv_buffer);
return 0;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
{ HV_SHUTDOWN_GUID,
.driver_data = (unsigned long)&util_shutdown
},
/* Time synch guid */
{ HV_TS_GUID,
.driver_data = (unsigned long)&util_timesynch
},
/* Heartbeat guid */
{ HV_HEART_BEAT_GUID,
.driver_data = (unsigned long)&util_heartbeat
},
/* KVP guid */
{ HV_KVP_GUID,
.driver_data = (unsigned long)&util_kvp
},
/* VSS GUID */
{ HV_VSS_GUID,
.driver_data = (unsigned long)&util_vss
},
{ },
};
MODULE_DEVICE_TABLE(vmbus, id_table);
/* The one and only one */
static struct hv_driver util_drv = {
.name = "hv_util",
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
};
static int __init init_hyperv_utils(void)
{
pr_info("Registering HyperV Utility Driver\n");
return vmbus_driver_register(&util_drv);
}
static void exit_hyperv_utils(void)
{
pr_info("De-Registered HyperV Utility Driver\n");
vmbus_driver_unregister(&util_drv);
}
module_init(init_hyperv_utils);
module_exit(exit_hyperv_utils);
MODULE_DESCRIPTION("Hyper-V Utilities");
MODULE_VERSION(HV_DRV_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
jgcaap/boeffla | arch/arm/mach-msm/msm_smem_iface.c | 2020 | 1642 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "msm_smem_iface.h"
/**
* mem_get_cpr_info() - Copy Core Power Reduction (CPR) driver specific
* data from Shared memory (SMEM).
* @cpr_info - Pointer to CPR data. Memory to be allocated and freed by
* calling function.
*
* Copy CPR specific data from SMEM to cpr_info.
*/
void msm_smem_get_cpr_info(struct cpr_info_type *cpr_info)
{
struct boot_info_for_apps *boot_info;
struct cpr_info_type *temp_cpr_info;
uint32_t smem_boot_info_size;
boot_info = smem_get_entry(SMEM_BOOT_INFO_FOR_APPS,
&smem_boot_info_size);
BUG_ON(!boot_info);
if (smem_boot_info_size < sizeof(struct boot_info_for_apps)) {
pr_err("%s: Shared boot info data structure too small!\n",
__func__);
BUG();
} else {
pr_debug("%s: Shared boot info available.\n", __func__);
}
temp_cpr_info = (struct cpr_info_type *) &(boot_info->cpr_info);
cpr_info->ring_osc = temp_cpr_info->ring_osc;
cpr_info->turbo_quot = temp_cpr_info->turbo_quot;
cpr_info->pvs_fuse = temp_cpr_info->pvs_fuse;
cpr_info->floor_fuse = temp_cpr_info->floor_fuse;
cpr_info->disable_cpr = temp_cpr_info->disable_cpr;
}
| gpl-2.0 |
viaembedded/springboard-kernel-bsp | drivers/watchdog/it8712f_wdt.c | 2532 | 9941 | /*
* IT8712F "Smart Guardian" Watchdog support
*
* Copyright (c) 2006-2007 Jorge Boncompte - DTI2 <jorge@dti2.net>
*
* Based on info and code taken from:
*
* drivers/char/watchdog/scx200_wdt.c
* drivers/hwmon/it87.c
* IT8712F EC-LPC I/O Preliminary Specification 0.8.2
* IT8712F EC-LPC I/O Preliminary Specification 0.9.3
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#define NAME "it8712f_wdt"
MODULE_AUTHOR("Jorge Boncompte - DTI2 <jorge@dti2.net>");
MODULE_DESCRIPTION("IT8712F Watchdog Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
static int max_units = 255;
static int margin = 60; /* in seconds */
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static unsigned long wdt_open;
static unsigned expect_close;
static spinlock_t io_lock;
static unsigned char revision;
/* Dog Food address - We use the game port address */
static unsigned short address;
#define REG 0x2e /* The register to read/write */
#define VAL 0x2f /* The value to read/write */
#define LDN 0x07 /* Register: Logical device select */
#define DEVID 0x20 /* Register: Device ID */
#define DEVREV 0x22 /* Register: Device Revision */
#define ACT_REG 0x30 /* LDN Register: Activation */
#define BASE_REG 0x60 /* LDN Register: Base address */
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
#define WDT_TIMEOUT 0x73 /* WDT Register: Timeout Value */
#define WDT_RESET_GAME 0x10 /* Reset timer on read or write to game port */
#define WDT_RESET_KBD 0x20 /* Reset timer on keyboard interrupt */
#define WDT_RESET_MOUSE 0x40 /* Reset timer on mouse interrupt */
#define WDT_RESET_CIR 0x80 /* Reset timer on consumer IR interrupt */
#define WDT_UNIT_SEC 0x80 /* If 0 in MINUTES */
#define WDT_OUT_PWROK 0x10 /* Pulse PWROK on timeout */
#define WDT_OUT_KRST 0x40 /* Pulse reset on timeout */
static int wdt_control_reg = WDT_RESET_GAME;
module_param(wdt_control_reg, int, 0);
MODULE_PARM_DESC(wdt_control_reg, "Value to write to watchdog control "
"register. The default WDT_RESET_GAME resets the timer on "
"game port reads that this driver generates. You can also "
"use KBD, MOUSE or CIR if you have some external way to "
"generate those interrupts.");
static int superio_inb(int reg)
{
outb(reg, REG);
return inb(VAL);
}
static void superio_outb(int val, int reg)
{
outb(reg, REG);
outb(val, VAL);
}
static int superio_inw(int reg)
{
int val;
outb(reg++, REG);
val = inb(VAL) << 8;
outb(reg, REG);
val |= inb(VAL);
return val;
}
static inline void superio_select(int ldn)
{
outb(LDN, REG);
outb(ldn, VAL);
}
static inline void superio_enter(void)
{
spin_lock(&io_lock);
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
spin_unlock(&io_lock);
}
static inline void it8712f_wdt_ping(void)
{
if (wdt_control_reg & WDT_RESET_GAME)
inb(address);
}
static void it8712f_wdt_update_margin(void)
{
int config = WDT_OUT_KRST | WDT_OUT_PWROK;
int units = margin;
/* Switch to minutes precision if the configured margin
* value does not fit within the register width.
*/
if (units <= max_units) {
config |= WDT_UNIT_SEC; /* else UNIT is MINUTES */
printk(KERN_INFO NAME ": timer margin %d seconds\n", units);
} else {
units /= 60;
printk(KERN_INFO NAME ": timer margin %d minutes\n", units);
}
superio_outb(config, WDT_CONFIG);
if (revision >= 0x08)
superio_outb(units >> 8, WDT_TIMEOUT + 1);
superio_outb(units, WDT_TIMEOUT);
}
static int it8712f_wdt_get_status(void)
{
if (superio_inb(WDT_CONTROL) & 0x01)
return WDIOF_CARDRESET;
else
return 0;
}
static void it8712f_wdt_enable(void)
{
printk(KERN_DEBUG NAME ": enabling watchdog timer\n");
superio_enter();
superio_select(LDN_GPIO);
superio_outb(wdt_control_reg, WDT_CONTROL);
it8712f_wdt_update_margin();
superio_exit();
it8712f_wdt_ping();
}
static void it8712f_wdt_disable(void)
{
printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
superio_enter();
superio_select(LDN_GPIO);
superio_outb(0, WDT_CONFIG);
superio_outb(0, WDT_CONTROL);
if (revision >= 0x08)
superio_outb(0, WDT_TIMEOUT + 1);
superio_outb(0, WDT_TIMEOUT);
superio_exit();
}
static int it8712f_wdt_notify(struct notifier_block *this,
unsigned long code, void *unused)
{
if (code == SYS_HALT || code == SYS_POWER_OFF)
if (!nowayout)
it8712f_wdt_disable();
return NOTIFY_DONE;
}
static struct notifier_block it8712f_wdt_notifier = {
.notifier_call = it8712f_wdt_notify,
};
static ssize_t it8712f_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* check for a magic close character */
if (len) {
size_t i;
it8712f_wdt_ping();
expect_close = 0;
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
return len;
}
static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.identity = "IT8712F Watchdog",
.firmware_version = 1,
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
int value;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
superio_enter();
superio_select(LDN_GPIO);
value = it8712f_wdt_get_status();
superio_exit();
return put_user(value, p);
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
it8712f_wdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(value, p))
return -EFAULT;
if (value < 1)
return -EINVAL;
if (value > (max_units * 60))
return -EINVAL;
margin = value;
superio_enter();
superio_select(LDN_GPIO);
it8712f_wdt_update_margin();
superio_exit();
it8712f_wdt_ping();
/* Fall through */
case WDIOC_GETTIMEOUT:
if (put_user(margin, p))
return -EFAULT;
return 0;
default:
return -ENOTTY;
}
}
static int it8712f_wdt_open(struct inode *inode, struct file *file)
{
/* only allow one at a time */
if (test_and_set_bit(0, &wdt_open))
return -EBUSY;
it8712f_wdt_enable();
return nonseekable_open(inode, file);
}
static int it8712f_wdt_release(struct inode *inode, struct file *file)
{
if (expect_close != 42) {
printk(KERN_WARNING NAME
": watchdog device closed unexpectedly, will not"
" disable the watchdog timer\n");
} else if (!nowayout) {
it8712f_wdt_disable();
}
expect_close = 0;
clear_bit(0, &wdt_open);
return 0;
}
static const struct file_operations it8712f_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
.open = it8712f_wdt_open,
.release = it8712f_wdt_release,
};
static struct miscdevice it8712f_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &it8712f_wdt_fops,
};
static int __init it8712f_wdt_find(unsigned short *address)
{
int err = -ENODEV;
int chip_type;
superio_enter();
chip_type = superio_inw(DEVID);
if (chip_type != IT8712F_DEVID)
goto exit;
superio_select(LDN_GAME);
superio_outb(1, ACT_REG);
if (!(superio_inb(ACT_REG) & 0x01)) {
printk(KERN_ERR NAME ": Device not activated, skipping\n");
goto exit;
}
*address = superio_inw(BASE_REG);
if (*address == 0) {
printk(KERN_ERR NAME ": Base address not set, skipping\n");
goto exit;
}
err = 0;
revision = superio_inb(DEVREV) & 0x0f;
/* Later revisions have 16-bit values per datasheet 0.9.1 */
if (revision >= 0x08)
max_units = 65535;
if (margin > (max_units * 60))
margin = (max_units * 60);
printk(KERN_INFO NAME ": Found IT%04xF chip revision %d - "
"using DogFood address 0x%x\n",
chip_type, revision, *address);
exit:
superio_exit();
return err;
}
static int __init it8712f_wdt_init(void)
{
int err = 0;
spin_lock_init(&io_lock);
if (it8712f_wdt_find(&address))
return -ENODEV;
if (!request_region(address, 1, "IT8712F Watchdog")) {
printk(KERN_WARNING NAME ": watchdog I/O region busy\n");
return -EBUSY;
}
it8712f_wdt_disable();
err = register_reboot_notifier(&it8712f_wdt_notifier);
if (err) {
printk(KERN_ERR NAME ": unable to register reboot notifier\n");
goto out;
}
err = misc_register(&it8712f_wdt_miscdev);
if (err) {
printk(KERN_ERR NAME
": cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, err);
goto reboot_out;
}
return 0;
reboot_out:
unregister_reboot_notifier(&it8712f_wdt_notifier);
out:
release_region(address, 1);
return err;
}
static void __exit it8712f_wdt_exit(void)
{
misc_deregister(&it8712f_wdt_miscdev);
unregister_reboot_notifier(&it8712f_wdt_notifier);
release_region(address, 1);
}
module_init(it8712f_wdt_init);
module_exit(it8712f_wdt_exit);
| gpl-2.0 |
zarboz/Ville-Z | tools/perf/util/ui/browsers/annotate.c | 2788 | 7340 | #include "../browser.h"
#include "../helpline.h"
#include "../libslang.h"
#include "../../annotate.h"
#include "../../hist.h"
#include "../../sort.h"
#include "../../symbol.h"
#include <pthread.h>
static void ui__error_window(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
va_end(ap);
}
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
struct rb_node *curr_hot;
};
struct objdump_line_rb_node {
struct rb_node rb_node;
double percent;
u32 idx;
};
static inline
struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
{
return (struct objdump_line_rb_node *)(self + 1);
}
static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
{
struct objdump_line *ol = rb_entry(entry, struct objdump_line, node);
bool current_entry = ui_browser__is_current_entry(self, row);
int width = self->width;
if (ol->offset != -1) {
struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
ui_browser__set_percent_color(self, olrb->percent, current_entry);
slsmg_printf(" %7.2f ", olrb->percent);
} else {
ui_browser__set_percent_color(self, 0, current_entry);
slsmg_write_nstring(" ", 9);
}
SLsmg_write_char(':');
slsmg_write_nstring(" ", 8);
if (!*ol->line)
slsmg_write_nstring(" ", width - 18);
else
slsmg_write_nstring(ol->line, width - 18);
if (!current_entry)
ui_browser__set_color(self, HE_COLORSET_CODE);
}
static double objdump_line__calc_percent(struct objdump_line *self,
struct symbol *sym, int evidx)
{
double percent = 0.0;
if (self->offset != -1) {
int len = sym->end - sym->start;
unsigned int hits = 0;
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = self->offset;
struct objdump_line *next;
next = objdump__get_next_ip_line(¬es->src->source, self);
while (offset < (s64)len &&
(next == NULL || offset < next->offset)) {
if (src_line) {
percent += src_line[offset].percent;
} else
hits += h->addr[offset];
++offset;
}
/*
* If the percentage wasn't already calculated in
* symbol__get_source_line, do it now:
*/
if (src_line == NULL && h->sum)
percent = 100.0 * hits / h->sum;
}
return percent;
}
static void objdump__insert_line(struct rb_root *self,
struct objdump_line_rb_node *line)
{
struct rb_node **p = &self->rb_node;
struct rb_node *parent = NULL;
struct objdump_line_rb_node *l;
while (*p != NULL) {
parent = *p;
l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
if (line->percent < l->percent)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&line->rb_node, parent, p);
rb_insert_color(&line->rb_node, self);
}
static void annotate_browser__set_top(struct annotate_browser *self,
struct rb_node *nd)
{
struct objdump_line_rb_node *rbpos;
struct objdump_line *pos;
unsigned back;
ui_browser__refresh_dimensions(&self->b);
back = self->b.height / 2;
rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
pos = ((struct objdump_line *)rbpos) - 1;
self->b.top_idx = self->b.index = rbpos->idx;
while (self->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct objdump_line, node);
--self->b.top_idx;
--back;
}
self->b.top = pos;
self->curr_hot = nd;
}
static void annotate_browser__calc_percent(struct annotate_browser *browser,
int evidx)
{
struct symbol *sym = browser->b.priv;
struct annotation *notes = symbol__annotation(sym);
struct objdump_line *pos;
browser->entries = RB_ROOT;
pthread_mutex_lock(¬es->lock);
list_for_each_entry(pos, ¬es->src->source, node) {
struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
if (rbpos->percent < 0.01) {
RB_CLEAR_NODE(&rbpos->rb_node);
continue;
}
objdump__insert_line(&browser->entries, rbpos);
}
pthread_mutex_unlock(¬es->lock);
browser->curr_hot = rb_last(&browser->entries);
}
static int annotate_browser__run(struct annotate_browser *self, int evidx,
int refresh)
{
struct rb_node *nd = NULL;
struct symbol *sym = self->b.priv;
/*
* RIGHT To allow builtin-annotate to cycle thru multiple symbols by
* examining the exit key for this function.
*/
int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB,
NEWT_KEY_RIGHT, 0 };
int key;
if (ui_browser__show(&self->b, sym->name,
"<-, -> or ESC: exit, TAB/shift+TAB: "
"cycle hottest lines, H: Hottest") < 0)
return -1;
ui_browser__add_exit_keys(&self->b, exit_keys);
annotate_browser__calc_percent(self, evidx);
if (self->curr_hot)
annotate_browser__set_top(self, self->curr_hot);
nd = self->curr_hot;
if (refresh != 0)
newtFormSetTimer(self->b.form, refresh);
while (1) {
key = ui_browser__run(&self->b);
if (refresh != 0) {
annotate_browser__calc_percent(self, evidx);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
* move to curr_hot (current hottest line).
*/
if (nd != NULL && RB_EMPTY_NODE(nd))
nd = NULL;
}
switch (key) {
case -1:
/*
* FIXME we need to check if it was
* es.reason == NEWT_EXIT_TIMER
*/
if (refresh != 0)
symbol__annotate_decay_histogram(sym, evidx);
continue;
case NEWT_KEY_TAB:
if (nd != NULL) {
nd = rb_prev(nd);
if (nd == NULL)
nd = rb_last(&self->entries);
} else
nd = self->curr_hot;
break;
case NEWT_KEY_UNTAB:
if (nd != NULL)
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&self->entries);
else
nd = self->curr_hot;
break;
case 'H':
nd = self->curr_hot;
break;
default:
goto out;
}
if (nd != NULL)
annotate_browser__set_top(self, nd);
}
out:
ui_browser__hide(&self->b);
return key;
}
int hist_entry__tui_annotate(struct hist_entry *he, int evidx)
{
return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0);
}
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
int refresh)
{
struct objdump_line *pos, *n;
struct annotation *notes;
struct annotate_browser browser = {
.b = {
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
.priv = sym,
},
};
int ret;
if (sym == NULL)
return -1;
if (map->dso->annotate_warned)
return -1;
if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
ui__error_window(ui_helpline__last_msg);
return -1;
}
ui_helpline__push("Press <- or ESC to exit");
notes = symbol__annotation(sym);
list_for_each_entry(pos, ¬es->src->source, node) {
struct objdump_line_rb_node *rbpos;
size_t line_len = strlen(pos->line);
if (browser.b.width < line_len)
browser.b.width = line_len;
rbpos = objdump_line__rb(pos);
rbpos->idx = browser.b.nr_entries++;
}
browser.b.entries = ¬es->src->source,
browser.b.width += 18; /* Percentage */
ret = annotate_browser__run(&browser, evidx, refresh);
list_for_each_entry_safe(pos, n, ¬es->src->source, node) {
list_del(&pos->node);
objdump_line__free(pos);
}
return ret;
}
| gpl-2.0 |
AD5GB/android_kernel_google_msm | drivers/gpu/drm/gma500/cdv_intel_display.c | 4068 | 40472 | /*
* Copyright © 2006-2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_display.h"
#include "power.h"
#include "cdv_device.h"
struct cdv_intel_range_t {
int min, max;
};
struct cdv_intel_p2_t {
int dot_limit;
int p2_slow, p2_fast;
};
struct cdv_intel_clock_t {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
};
#define INTEL_P2_NUM 2
struct cdv_intel_limit_t {
struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
struct cdv_intel_p2_t p2;
};
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
#define CDV_LIMIT_DAC_HDMI_27 2
#define CDV_LIMIT_DAC_HDMI_96 3
static const struct cdv_intel_limit_t cdv_intel_limits[] = {
{ /* CDV_SIGNLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
.p2 = {.dot_limit = 200000,
.p2_slow = 14, .p2_fast = 14},
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1809000, .max = 3564000},
.n = {.min = 1, .max = 1},
.m = {.min = 67, .max = 132},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 65, .max = 130},
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1800000, .max = 3600000},
.n = {.min = 2, .max = 6},
.m = {.min = 60, .max = 160},
.m1 = {.min = 0, .max = 0},
.m2 = {.min = 58, .max = 158},
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
},
};
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !in_dbg_master()) \
msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
{
int ret;
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before read\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after read\n");
return ret;
}
*val = REG_READ(SB_DATA);
return 0;
}
static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
{
int ret;
static bool dpio_debug = true;
u32 temp;
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
}
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle before write\n");
return ret;
}
REG_WRITE(SB_ADDR, reg);
REG_WRITE(SB_DATA, val);
REG_WRITE(SB_PCKT,
SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
SET_FIELD(SB_DEST_DPLL, SB_DEST) |
SET_FIELD(0xf, SB_BYTE_ENABLE));
ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
if (ret) {
DRM_ERROR("timeout waiting for SB to idle after write\n");
return ret;
}
if (dpio_debug) {
if (cdv_sb_read(dev, reg, &temp) == 0)
DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
}
return 0;
}
/* Reset the DPIO configuration register. The BIOS does this at every
* mode set.
*/
static void cdv_sb_reset(struct drm_device *dev)
{
REG_WRITE(DPIO_CFG, 0);
REG_READ(DPIO_CFG);
REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
}
/* Unlike most Intel display engines, on Cedarview the DPLL registers
* are behind this sideband bus. They must be programmed while the
* DPLL reference clock is on in the DPLL control register, but before
* the DPLL is enabled in the DPLL control register.
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
struct cdv_intel_clock_t *clock)
{
struct psb_intel_crtc *psb_crtc =
to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
u32 ref_value;
cdv_sb_reset(dev);
if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
return -EBUSY;
}
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
m &= ~SB_M_DIVIDER_MASK;
m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
ret = cdv_sb_write(dev, SB_M(pipe), m);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
if (ret)
return ret;
/* Follow the BIOS to program the N_DIVIDER REG */
n_vco &= 0xFFFF;
n_vco |= 0x107;
n_vco &= ~(SB_N_VCO_SEL_MASK |
SB_N_DIVIDER_MASK |
SB_N_CB_TUNE_MASK);
n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
if (clock->vco < 2250000) {
n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 2750000) {
n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
} else if (clock->vco < 3300000) {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
} else {
n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
}
ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
if (ret)
return ret;
ret = cdv_sb_read(dev, SB_P(pipe), &p);
if (ret)
return ret;
p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
switch (clock->p2) {
case 5:
p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
break;
case 10:
p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
break;
case 14:
p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
break;
case 7:
p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
break;
default:
DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
return -EINVAL;
}
ret = cdv_sb_write(dev, SB_P(pipe), p);
if (ret)
return ret;
/* always Program the Lane Register for the Pipe A*/
if (pipe == 0) {
/* Program the Lane0/1 for HDMI B */
u32 lane_reg, lane_value;
lane_reg = PSB_LANE0;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE;
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE1;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE;
cdv_sb_write(dev, lane_reg, lane_value);
/* Program the Lane2/3 for HDMI C */
lane_reg = PSB_LANE2;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE;
cdv_sb_write(dev, lane_reg, lane_value);
lane_reg = PSB_LANE3;
cdv_sb_read(dev, lane_reg, &lane_value);
lane_value &= ~(LANE_PLL_MASK);
lane_value |= LANE_PLL_ENABLE;
cdv_sb_write(dev, lane_reg, lane_value);
}
return 0;
}
/*
* Returns whether any encoder on the specified pipe is of the specified type
*/
static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *l_entry;
list_for_each_entry(l_entry, &mode_config->connector_list, head) {
if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(l_entry);
if (psb_intel_encoder->type == type)
return true;
}
}
return false;
}
static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
int refclk)
{
const struct cdv_intel_limit_t *limit;
if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* Now only single-channel LVDS is supported on CDV. If it is
* incorrect, please add the dual-channel LVDS.
*/
if (refclk == 96000)
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
} else {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
else
limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
}
return limit;
}
/* m1 is reserved as 0 in CDV, n is a ring counter */
static void cdv_intel_clock(struct drm_device *dev,
int refclk, struct cdv_intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = (refclk * clock->m) / clock->n;
clock->dot = clock->vco / clock->p;
}
#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
const struct cdv_intel_limit_t *limit,
struct cdv_intel_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
/* unnecessary to check the range of m(m1/M2)/n again */
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid("vco out of range\n");
/* XXX: We may need to be checking "Dot clock"
* depending on the multiplier, connector, etc.,
* rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
INTELPllInvalid("dot out of range\n");
return true;
}
static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
int refclk,
struct cdv_intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct cdv_intel_clock_t clock;
const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
int err = target;
if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
/*
* For LVDS, if the panel is on, just rely on its current
* settings for dual-channel. We haven't figured out how to
* reliably set up different single/dual channel state, if we
* even can.
*/
if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
clock.m1 = 0;
/* m1 is reserved as 0 in CDV, n is a ring counter.
So skip the m1 loop */
for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
clock.m2++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max;
clock.p1++) {
int this_err;
cdv_intel_clock(dev, refclk, &clock);
if (!cdv_intel_PLL_is_valid(crtc,
limit, &clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
return err != target;
}
static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
unsigned long start, offset;
int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
if (!gma_power_begin(dev, true))
return 0;
/* no fb bound */
if (!crtc->fb) {
dev_err(dev->dev, "No FB bound\n");
goto psb_intel_pipe_cleaner;
}
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
ret = psb_gtt_pin(psbfb->gtt);
if (ret < 0)
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (crtc->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto psb_intel_pipe_set_base_exit;
}
REG_WRITE(dspcntr_reg, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
REG_WRITE(dspbase, offset);
REG_READ(dspbase);
REG_WRITE(dspsurf, start);
REG_READ(dspsurf);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
psb_intel_pipe_set_base_exit:
gma_power_end(dev);
return ret;
}
/**
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
temp = REG_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE(dpll_reg, temp);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Jim Bish - switch plan and pipe per scott */
/* Enable the plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(dspcntr_reg,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
}
udelay(150);
/* Enable the pipe */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0)
REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
psb_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
break;
case DRM_MODE_DPMS_OFF:
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Jim Bish - changed pipe/plane here as well. */
/* Wait for vblank for the disable to take effect */
cdv_intel_wait_for_vblank(dev);
/* Next, disable display pipes */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
REG_READ(pipeconf_reg);
}
/* Wait for vblank for the disable to take effect. */
cdv_intel_wait_for_vblank(dev);
udelay(150);
/* Disable display plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(dspcntr_reg,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
REG_READ(dspbase_reg);
}
temp = REG_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
}
/* Wait for the clocks to turn off. */
udelay(150);
break;
}
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3F3E);
}
static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
return (pfit_control >> 29) & 0x3;
}
static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_hdmi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct psb_intel_encoder *psb_intel_encoder =
psb_intel_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_HDMI:
is_hdmi = true;
break;
}
}
refclk = 96000;
/* Hack selection about ref clk for CRT */
/* Select 27MHz as the reference clk for HDMI */
if (is_crt || is_hdmi)
refclk = 27000;
drm_mode_debug_printmodeline(adjusted_mode);
ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
return 0;
}
dpll = DPLL_VGA_MODE_DIS;
if (is_tv) {
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_SYNCLOCK_ENABLE;
dpll |= DPLL_VGA_MODE_DIS;
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
/* dpll |= (2 << 11); */
/* setup pipeconf */
pipeconf = REG_READ(pipeconf_reg);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
REG_READ(dpll_reg);
cdv_dpll_set_clock_cdv(dev, crtc, &clock);
udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
u32 lvds = REG_READ(LVDS);
lvds |=
LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
LVDS_PIPEB_SELECT;
/* Set the B0-B3 data pairs corresponding to
* whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more
* thoroughly into how panels behave in the two modes.
*/
REG_WRITE(LVDS, lvds);
REG_READ(LVDS);
}
dpll |= DPLL_VCO_ENABLE;
/* Disable the panel fitter if it was on our pipe */
if (cdv_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
REG_WRITE(dpll_reg,
(REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
REG_WRITE(dspsize_reg,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(dsppos_reg, 0);
REG_WRITE(pipesrc_reg,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
REG_WRITE(pipeconf_reg, pipeconf);
REG_READ(pipeconf_reg);
cdv_intel_wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
{
struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
cdv_intel_wait_for_vblank(dev);
return 0;
}
/** Loads the palette/gamma unit for the CRTC with the prepared values */
static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int palreg = PALETTE_A;
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
switch (psb_intel_crtc->pipe) {
case 0:
break;
case 1:
palreg = PALETTE_B;
break;
case 2:
palreg = PALETTE_C;
break;
default:
dev_err(dev->dev, "Illegal Pipe Number.\n");
return;
}
if (gma_power_begin(dev, false)) {
for (i = 0; i < 256; i++) {
REG_WRITE(palreg + 4 * i,
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
psb_intel_crtc->lut_adj[i]) << 8) |
(psb_intel_crtc->lut_b[i] +
psb_intel_crtc->lut_adj[i]));
}
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
dev_priv->regs.psb.save_palette_a[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
psb_intel_crtc->lut_adj[i]) << 8) |
(psb_intel_crtc->lut_b[i] +
psb_intel_crtc->lut_adj[i]);
}
}
}
/**
* Save HW states of giving crtc
*/
static void cdv_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
/* struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private; */
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
int pipeA = (psb_intel_crtc->pipe == 0);
uint32_t paletteReg;
int i;
if (!crtc_state) {
dev_dbg(dev->dev, "No CRTC state found\n");
return;
}
crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
/*NOTE: DSPSIZE DSPPOS only for psb*/
crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
crtc_state->savePIPECONF,
crtc_state->savePIPESRC,
crtc_state->saveFP0,
crtc_state->saveFP1,
crtc_state->saveDPLL,
crtc_state->saveHTOTAL,
crtc_state->saveHBLANK,
crtc_state->saveHSYNC,
crtc_state->saveVTOTAL,
crtc_state->saveVBLANK,
crtc_state->saveVSYNC,
crtc_state->saveDSPSTRIDE,
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
);
paletteReg = pipeA ? PALETTE_A : PALETTE_B;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
/**
* Restore HW states of giving crtc
*/
static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
/* struct drm_psb_private * dev_priv =
(struct drm_psb_private *)dev->dev_private; */
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
int pipeA = (psb_intel_crtc->pipe == 0);
uint32_t paletteReg;
int i;
if (!crtc_state) {
dev_dbg(dev->dev, "No crtc state\n");
return;
}
DRM_DEBUG(
"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
REG_READ(pipeA ? FPA0 : FPB0),
REG_READ(pipeA ? FPA1 : FPB1),
REG_READ(pipeA ? DPLL_A : DPLL_B),
REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
REG_READ(pipeA ? HBLANK_A : HBLANK_B),
REG_READ(pipeA ? HSYNC_A : HSYNC_B),
REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
REG_READ(pipeA ? VBLANK_A : VBLANK_B),
REG_READ(pipeA ? VSYNC_A : VSYNC_B),
REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
REG_READ(pipeA ? DSPAPOS : DSPBPOS),
REG_READ(pipeA ? DSPABASE : DSPBBASE)
);
DRM_DEBUG(
"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
crtc_state->savePIPECONF,
crtc_state->savePIPESRC,
crtc_state->saveFP0,
crtc_state->saveFP1,
crtc_state->saveDPLL,
crtc_state->saveHTOTAL,
crtc_state->saveHBLANK,
crtc_state->saveHSYNC,
crtc_state->saveVTOTAL,
crtc_state->saveVBLANK,
crtc_state->saveVSYNC,
crtc_state->saveDSPSTRIDE,
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
);
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
REG_WRITE(pipeA ? DPLL_A : DPLL_B,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
REG_READ(pipeA ? DPLL_A : DPLL_B);
DRM_DEBUG("write dpll: %x\n",
REG_READ(pipeA ? DPLL_A : DPLL_B));
udelay(150);
}
REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
REG_READ(pipeA ? FPA0 : FPB0);
REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
REG_READ(pipeA ? FPA1 : FPB1);
REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
REG_READ(pipeA ? DPLL_A : DPLL_B);
udelay(150);
REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
cdv_intel_wait_for_vblank(dev);
REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
cdv_intel_wait_for_vblank(dev);
paletteReg = pipeA ? PALETTE_A : PALETTE_B;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp;
size_t addr = 0;
struct gtt_range *gt;
struct drm_gem_object *obj;
int ret;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
/* turn off the cursor */
temp = CURSOR_MODE_DISABLE;
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, 0);
gma_power_end(dev);
}
/* unpin the old GEM object */
if (psb_intel_crtc->cursor_obj) {
gt = container_of(psb_intel_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = NULL;
}
return 0;
}
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
return -EINVAL;
}
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
return -ENOENT;
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
return -ENOMEM;
}
gt = container_of(obj, struct gtt_range, gem);
/* Pin the memory into the GTT */
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
return ret;
}
addr = gt->offset; /* Or resource.start ??? */
psb_intel_crtc->cursor_addr = addr;
temp = 0;
/* set the pipe for the cursor */
temp |= (pipe << 28);
temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
if (gma_power_begin(dev, false)) {
REG_WRITE(control, temp);
REG_WRITE(base, addr);
gma_power_end(dev);
}
/* unpin the old GEM object */
if (psb_intel_crtc->cursor_obj) {
gt = container_of(psb_intel_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = obj;
}
return 0;
}
static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
uint32_t temp = 0;
uint32_t adder;
if (x < 0) {
temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
x = -x;
}
if (y < 0) {
temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
y = -y;
}
temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
adder = psb_intel_crtc->cursor_addr;
if (gma_power_begin(dev, false)) {
REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
gma_power_end(dev);
}
return 0;
}
static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, uint32_t start, uint32_t size)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int i;
int end = (start + size > 256) ? 256 : start + size;
for (i = start; i < end; i++) {
psb_intel_crtc->lut_r[i] = red[i] >> 8;
psb_intel_crtc->lut_g[i] = green[i] >> 8;
psb_intel_crtc->lut_b[i] = blue[i] >> 8;
}
cdv_intel_crtc_load_lut(crtc);
}
static int cdv_crtc_set_config(struct drm_mode_set *set)
{
int ret = 0;
struct drm_device *dev = set->crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
if (!dev_priv->rpm_enabled)
return drm_crtc_helper_set_config(set);
pm_runtime_forbid(&dev->pdev->dev);
ret = drm_crtc_helper_set_config(set);
pm_runtime_allow(&dev->pdev->dev);
return ret;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
/* FIXME: why are we using this, should it be cdv_ in this tree ? */
static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
u32 dpll;
u32 fp;
struct cdv_intel_clock_t clock;
bool is_lvds;
struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
else
fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
dpll = (pipe == 0) ?
dev_priv->regs.psb.saveDPLL_A :
dev_priv->regs.psb.saveDPLL_B;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = (pipe == 0) ?
dev_priv->regs.psb.saveFPA0 :
dev_priv->regs.psb.saveFPB0;
else
fp = (pipe == 0) ?
dev_priv->regs.psb.saveFPA1 :
dev_priv->regs.psb.saveFPB1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
}
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
if (is_lvds) {
clock.p1 =
ffs((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
if (clock.p1 == 0) {
clock.p1 = 4;
dev_err(dev->dev, "PLL %d\n", dpll);
}
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
i8xx_clock(66000, &clock);
} else
i8xx_clock(48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 =
((dpll &
DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
i8xx_clock(48000, &clock);
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
gma_power_end(dev);
} else {
htot = (pipe == 0) ?
dev_priv->regs.psb.saveHTOTAL_A :
dev_priv->regs.psb.saveHTOTAL_B;
hsync = (pipe == 0) ?
dev_priv->regs.psb.saveHSYNC_A :
dev_priv->regs.psb.saveHSYNC_B;
vtot = (pipe == 0) ?
dev_priv->regs.psb.saveVTOTAL_A :
dev_priv->regs.psb.saveVTOTAL_B;
vsync = (pipe == 0) ?
dev_priv->regs.psb.saveVSYNC_A :
dev_priv->regs.psb.saveVSYNC_B;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
kfree(psb_intel_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(psb_intel_crtc);
}
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = cdv_intel_crtc_dpms,
.mode_fixup = cdv_intel_crtc_mode_fixup,
.mode_set = cdv_intel_crtc_mode_set,
.mode_set_base = cdv_intel_pipe_set_base,
.prepare = cdv_intel_crtc_prepare,
.commit = cdv_intel_crtc_commit,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.save = cdv_intel_crtc_save,
.restore = cdv_intel_crtc_restore,
.cursor_set = cdv_intel_crtc_cursor_set,
.cursor_move = cdv_intel_crtc_cursor_move,
.gamma_set = cdv_intel_crtc_gamma_set,
.set_config = cdv_crtc_set_config,
.destroy = cdv_intel_crtc_destroy,
};
| gpl-2.0 |
wrxtasy/linux | arch/sparc/lib/bitext.c | 4324 | 2915 | /*
* bitext.c: kernel little helper (of bit shuffling variety).
*
* Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
*
* The algorithm to search a zero bit string is geared towards its application.
* We expect a couple of fixed sizes of requests, so a rotating counter, reset
* by align size, should provide fast enough search while maintaining low
* fragmentation.
*/
#include <linux/string.h>
#include <linux/bitmap.h>
#include <asm/bitext.h>
/**
* bit_map_string_get - find and set a bit string in bit map.
* @t: the bit map.
* @len: requested string length
* @align: requested alignment
*
* Returns offset in the map or -1 if out of space.
*
* Not safe to call from an interrupt (uses spin_lock).
*/
int bit_map_string_get(struct bit_map *t, int len, int align)
{
int offset, count; /* siamese twins */
int off_new;
int align1;
int i, color;
if (t->num_colors) {
/* align is overloaded to be the page color */
color = align;
align = t->num_colors;
} else {
color = 0;
if (align == 0)
align = 1;
}
align1 = align - 1;
if ((align & align1) != 0)
BUG();
if (align < 0 || align >= t->size)
BUG();
if (len <= 0 || len > t->size)
BUG();
color &= align1;
spin_lock(&t->lock);
if (len < t->last_size)
offset = t->first_free;
else
offset = t->last_off & ~align1;
count = 0;
for (;;) {
off_new = find_next_zero_bit(t->map, t->size, offset);
off_new = ((off_new + align1) & ~align1) + color;
count += off_new - offset;
offset = off_new;
if (offset >= t->size)
offset = 0;
if (count + len > t->size) {
spin_unlock(&t->lock);
/* P3 */ printk(KERN_ERR
"bitmap out: size %d used %d off %d len %d align %d count %d\n",
t->size, t->used, offset, len, align, count);
return -1;
}
if (offset + len > t->size) {
count += t->size - offset;
offset = 0;
continue;
}
i = 0;
while (test_bit(offset + i, t->map) == 0) {
i++;
if (i == len) {
bitmap_set(t->map, offset, len);
if (offset == t->first_free)
t->first_free = find_next_zero_bit
(t->map, t->size,
t->first_free + len);
if ((t->last_off = offset + len) >= t->size)
t->last_off = 0;
t->used += len;
t->last_size = len;
spin_unlock(&t->lock);
return offset;
}
}
count += i + 1;
if ((offset += i + 1) >= t->size)
offset = 0;
}
}
void bit_map_clear(struct bit_map *t, int offset, int len)
{
int i;
if (t->used < len)
BUG(); /* Much too late to do any good, but alas... */
spin_lock(&t->lock);
for (i = 0; i < len; i++) {
if (test_bit(offset + i, t->map) == 0)
BUG();
__clear_bit(offset + i, t->map);
}
if (offset < t->first_free)
t->first_free = offset;
t->used -= len;
spin_unlock(&t->lock);
}
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{
bitmap_zero(map, size);
memset(t, 0, sizeof *t);
spin_lock_init(&t->lock);
t->map = map;
t->size = size;
}
| gpl-2.0 |
Kiffmet/FireWork-Kernel-GT-i9506 | drivers/net/ethernet/micrel/ksz884x.c | 4836 | 183903 | /**
* drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
*
* Copyright (c) 2009-2010 Micrel, Inc.
* Tristram Ha <Tristram.Ha@micrel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/slab.h>
/* DMA Registers */
#define KS_DMA_TX_CTRL 0x0000
#define DMA_TX_ENABLE 0x00000001
#define DMA_TX_CRC_ENABLE 0x00000002
#define DMA_TX_PAD_ENABLE 0x00000004
#define DMA_TX_LOOPBACK 0x00000100
#define DMA_TX_FLOW_ENABLE 0x00000200
#define DMA_TX_CSUM_IP 0x00010000
#define DMA_TX_CSUM_TCP 0x00020000
#define DMA_TX_CSUM_UDP 0x00040000
#define DMA_TX_BURST_SIZE 0x3F000000
#define KS_DMA_RX_CTRL 0x0004
#define DMA_RX_ENABLE 0x00000001
#define KS884X_DMA_RX_MULTICAST 0x00000002
#define DMA_RX_PROMISCUOUS 0x00000004
#define DMA_RX_ERROR 0x00000008
#define DMA_RX_UNICAST 0x00000010
#define DMA_RX_ALL_MULTICAST 0x00000020
#define DMA_RX_BROADCAST 0x00000040
#define DMA_RX_FLOW_ENABLE 0x00000200
#define DMA_RX_CSUM_IP 0x00010000
#define DMA_RX_CSUM_TCP 0x00020000
#define DMA_RX_CSUM_UDP 0x00040000
#define DMA_RX_BURST_SIZE 0x3F000000
#define DMA_BURST_SHIFT 24
#define DMA_BURST_DEFAULT 8
#define KS_DMA_TX_START 0x0008
#define KS_DMA_RX_START 0x000C
#define DMA_START 0x00000001
#define KS_DMA_TX_ADDR 0x0010
#define KS_DMA_RX_ADDR 0x0014
#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
#define DMA_ADDR_LIST_SHIFT 2
/* MTR0 */
#define KS884X_MULTICAST_0_OFFSET 0x0020
#define KS884X_MULTICAST_1_OFFSET 0x0021
#define KS884X_MULTICAST_2_OFFSET 0x0022
#define KS884x_MULTICAST_3_OFFSET 0x0023
/* MTR1 */
#define KS884X_MULTICAST_4_OFFSET 0x0024
#define KS884X_MULTICAST_5_OFFSET 0x0025
#define KS884X_MULTICAST_6_OFFSET 0x0026
#define KS884X_MULTICAST_7_OFFSET 0x0027
/* Interrupt Registers */
/* INTEN */
#define KS884X_INTERRUPTS_ENABLE 0x0028
/* INTST */
#define KS884X_INTERRUPTS_STATUS 0x002C
#define KS884X_INT_RX_STOPPED 0x02000000
#define KS884X_INT_TX_STOPPED 0x04000000
#define KS884X_INT_RX_OVERRUN 0x08000000
#define KS884X_INT_TX_EMPTY 0x10000000
#define KS884X_INT_RX 0x20000000
#define KS884X_INT_TX 0x40000000
#define KS884X_INT_PHY 0x80000000
#define KS884X_INT_RX_MASK \
(KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
#define KS884X_INT_TX_MASK \
(KS884X_INT_TX | KS884X_INT_TX_EMPTY)
#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
/* MAC Additional Station Address */
/* MAAL0 */
#define KS_ADD_ADDR_0_LO 0x0080
/* MAAH0 */
#define KS_ADD_ADDR_0_HI 0x0084
/* MAAL1 */
#define KS_ADD_ADDR_1_LO 0x0088
/* MAAH1 */
#define KS_ADD_ADDR_1_HI 0x008C
/* MAAL2 */
#define KS_ADD_ADDR_2_LO 0x0090
/* MAAH2 */
#define KS_ADD_ADDR_2_HI 0x0094
/* MAAL3 */
#define KS_ADD_ADDR_3_LO 0x0098
/* MAAH3 */
#define KS_ADD_ADDR_3_HI 0x009C
/* MAAL4 */
#define KS_ADD_ADDR_4_LO 0x00A0
/* MAAH4 */
#define KS_ADD_ADDR_4_HI 0x00A4
/* MAAL5 */
#define KS_ADD_ADDR_5_LO 0x00A8
/* MAAH5 */
#define KS_ADD_ADDR_5_HI 0x00AC
/* MAAL6 */
#define KS_ADD_ADDR_6_LO 0x00B0
/* MAAH6 */
#define KS_ADD_ADDR_6_HI 0x00B4
/* MAAL7 */
#define KS_ADD_ADDR_7_LO 0x00B8
/* MAAH7 */
#define KS_ADD_ADDR_7_HI 0x00BC
/* MAAL8 */
#define KS_ADD_ADDR_8_LO 0x00C0
/* MAAH8 */
#define KS_ADD_ADDR_8_HI 0x00C4
/* MAAL9 */
#define KS_ADD_ADDR_9_LO 0x00C8
/* MAAH9 */
#define KS_ADD_ADDR_9_HI 0x00CC
/* MAAL10 */
#define KS_ADD_ADDR_A_LO 0x00D0
/* MAAH10 */
#define KS_ADD_ADDR_A_HI 0x00D4
/* MAAL11 */
#define KS_ADD_ADDR_B_LO 0x00D8
/* MAAH11 */
#define KS_ADD_ADDR_B_HI 0x00DC
/* MAAL12 */
#define KS_ADD_ADDR_C_LO 0x00E0
/* MAAH12 */
#define KS_ADD_ADDR_C_HI 0x00E4
/* MAAL13 */
#define KS_ADD_ADDR_D_LO 0x00E8
/* MAAH13 */
#define KS_ADD_ADDR_D_HI 0x00EC
/* MAAL14 */
#define KS_ADD_ADDR_E_LO 0x00F0
/* MAAH14 */
#define KS_ADD_ADDR_E_HI 0x00F4
/* MAAL15 */
#define KS_ADD_ADDR_F_LO 0x00F8
/* MAAH15 */
#define KS_ADD_ADDR_F_HI 0x00FC
#define ADD_ADDR_HI_MASK 0x0000FFFF
#define ADD_ADDR_ENABLE 0x80000000
#define ADD_ADDR_INCR 8
/* Miscellaneous Registers */
/* MARL */
#define KS884X_ADDR_0_OFFSET 0x0200
#define KS884X_ADDR_1_OFFSET 0x0201
/* MARM */
#define KS884X_ADDR_2_OFFSET 0x0202
#define KS884X_ADDR_3_OFFSET 0x0203
/* MARH */
#define KS884X_ADDR_4_OFFSET 0x0204
#define KS884X_ADDR_5_OFFSET 0x0205
/* OBCR */
#define KS884X_BUS_CTRL_OFFSET 0x0210
#define BUS_SPEED_125_MHZ 0x0000
#define BUS_SPEED_62_5_MHZ 0x0001
#define BUS_SPEED_41_66_MHZ 0x0002
#define BUS_SPEED_25_MHZ 0x0003
/* EEPCR */
#define KS884X_EEPROM_CTRL_OFFSET 0x0212
#define EEPROM_CHIP_SELECT 0x0001
#define EEPROM_SERIAL_CLOCK 0x0002
#define EEPROM_DATA_OUT 0x0004
#define EEPROM_DATA_IN 0x0008
#define EEPROM_ACCESS_ENABLE 0x0010
/* MBIR */
#define KS884X_MEM_INFO_OFFSET 0x0214
#define RX_MEM_TEST_FAILED 0x0008
#define RX_MEM_TEST_FINISHED 0x0010
#define TX_MEM_TEST_FAILED 0x0800
#define TX_MEM_TEST_FINISHED 0x1000
/* GCR */
#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
#define GLOBAL_SOFTWARE_RESET 0x0001
#define KS8841_POWER_MANAGE_OFFSET 0x0218
/* WFCR */
#define KS8841_WOL_CTRL_OFFSET 0x021A
#define KS8841_WOL_MAGIC_ENABLE 0x0080
#define KS8841_WOL_FRAME3_ENABLE 0x0008
#define KS8841_WOL_FRAME2_ENABLE 0x0004
#define KS8841_WOL_FRAME1_ENABLE 0x0002
#define KS8841_WOL_FRAME0_ENABLE 0x0001
/* WF0 */
#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
/* IACR */
#define KS884X_IACR_P 0x04A0
#define KS884X_IACR_OFFSET KS884X_IACR_P
/* IADR1 */
#define KS884X_IADR1_P 0x04A2
#define KS884X_IADR2_P 0x04A4
#define KS884X_IADR3_P 0x04A6
#define KS884X_IADR4_P 0x04A8
#define KS884X_IADR5_P 0x04AA
#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
/* P1MBCR */
#define KS884X_P1MBCR_P 0x04D0
#define KS884X_P1MBSR_P 0x04D2
#define KS884X_PHY1ILR_P 0x04D4
#define KS884X_PHY1IHR_P 0x04D6
#define KS884X_P1ANAR_P 0x04D8
#define KS884X_P1ANLPR_P 0x04DA
/* P2MBCR */
#define KS884X_P2MBCR_P 0x04E0
#define KS884X_P2MBSR_P 0x04E2
#define KS884X_PHY2ILR_P 0x04E4
#define KS884X_PHY2IHR_P 0x04E6
#define KS884X_P2ANAR_P 0x04E8
#define KS884X_P2ANLPR_P 0x04EA
#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
#define KS884X_PHY_CTRL_OFFSET 0x00
/* Mode Control Register */
#define PHY_REG_CTRL 0
#define PHY_RESET 0x8000
#define PHY_LOOPBACK 0x4000
#define PHY_SPEED_100MBIT 0x2000
#define PHY_AUTO_NEG_ENABLE 0x1000
#define PHY_POWER_DOWN 0x0800
#define PHY_MII_DISABLE 0x0400
#define PHY_AUTO_NEG_RESTART 0x0200
#define PHY_FULL_DUPLEX 0x0100
#define PHY_COLLISION_TEST 0x0080
#define PHY_HP_MDIX 0x0020
#define PHY_FORCE_MDIX 0x0010
#define PHY_AUTO_MDIX_DISABLE 0x0008
#define PHY_REMOTE_FAULT_DISABLE 0x0004
#define PHY_TRANSMIT_DISABLE 0x0002
#define PHY_LED_DISABLE 0x0001
#define KS884X_PHY_STATUS_OFFSET 0x02
/* Mode Status Register */
#define PHY_REG_STATUS 1
#define PHY_100BT4_CAPABLE 0x8000
#define PHY_100BTX_FD_CAPABLE 0x4000
#define PHY_100BTX_CAPABLE 0x2000
#define PHY_10BT_FD_CAPABLE 0x1000
#define PHY_10BT_CAPABLE 0x0800
#define PHY_MII_SUPPRESS_CAPABLE 0x0040
#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
#define PHY_REMOTE_FAULT 0x0010
#define PHY_AUTO_NEG_CAPABLE 0x0008
#define PHY_LINK_STATUS 0x0004
#define PHY_JABBER_DETECT 0x0002
#define PHY_EXTENDED_CAPABILITY 0x0001
#define KS884X_PHY_ID_1_OFFSET 0x04
#define KS884X_PHY_ID_2_OFFSET 0x06
/* PHY Identifier Registers */
#define PHY_REG_ID_1 2
#define PHY_REG_ID_2 3
#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
/* Auto-Negotiation Advertisement Register */
#define PHY_REG_AUTO_NEGOTIATION 4
#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
/* Not supported. */
#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
#define PHY_AUTO_NEG_100BT4 0x0200
#define PHY_AUTO_NEG_100BTX_FD 0x0100
#define PHY_AUTO_NEG_100BTX 0x0080
#define PHY_AUTO_NEG_10BT_FD 0x0040
#define PHY_AUTO_NEG_10BT 0x0020
#define PHY_AUTO_NEG_SELECTOR 0x001F
#define PHY_AUTO_NEG_802_3 0x0001
#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
/* Auto-Negotiation Link Partner Ability Register */
#define PHY_REG_REMOTE_CAPABILITY 5
#define PHY_REMOTE_NEXT_PAGE 0x8000
#define PHY_REMOTE_ACKNOWLEDGE 0x4000
#define PHY_REMOTE_REMOTE_FAULT 0x2000
#define PHY_REMOTE_SYM_PAUSE 0x0400
#define PHY_REMOTE_100BTX_FD 0x0100
#define PHY_REMOTE_100BTX 0x0080
#define PHY_REMOTE_10BT_FD 0x0040
#define PHY_REMOTE_10BT 0x0020
/* P1VCT */
#define KS884X_P1VCT_P 0x04F0
#define KS884X_P1PHYCTRL_P 0x04F2
/* P2VCT */
#define KS884X_P2VCT_P 0x04F4
#define KS884X_P2PHYCTRL_P 0x04F6
#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
#define KS884X_PHY_LINK_MD_OFFSET 0x00
#define PHY_START_CABLE_DIAG 0x8000
#define PHY_CABLE_DIAG_RESULT 0x6000
#define PHY_CABLE_STAT_NORMAL 0x0000
#define PHY_CABLE_STAT_OPEN 0x2000
#define PHY_CABLE_STAT_SHORT 0x4000
#define PHY_CABLE_STAT_FAILED 0x6000
#define PHY_CABLE_10M_SHORT 0x1000
#define PHY_CABLE_FAULT_COUNTER 0x01FF
#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
#define PHY_STAT_REVERSED_POLARITY 0x0020
#define PHY_STAT_MDIX 0x0010
#define PHY_FORCE_LINK 0x0008
#define PHY_POWER_SAVING_DISABLE 0x0004
#define PHY_REMOTE_LOOPBACK 0x0002
/* SIDER */
#define KS884X_SIDER_P 0x0400
#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
#define REG_FAMILY_ID 0x88
#define REG_CHIP_ID_41 0x8810
#define REG_CHIP_ID_42 0x8800
#define KS884X_CHIP_ID_MASK_41 0xFF10
#define KS884X_CHIP_ID_MASK 0xFFF0
#define KS884X_CHIP_ID_SHIFT 4
#define KS884X_REVISION_MASK 0x000E
#define KS884X_REVISION_SHIFT 1
#define KS8842_START 0x0001
#define CHIP_IP_41_M 0x8810
#define CHIP_IP_42_M 0x8800
#define CHIP_IP_61_M 0x8890
#define CHIP_IP_62_M 0x8880
#define CHIP_IP_41_P 0x8850
#define CHIP_IP_42_P 0x8840
#define CHIP_IP_61_P 0x88D0
#define CHIP_IP_62_P 0x88C0
/* SGCR1 */
#define KS8842_SGCR1_P 0x0402
#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
#define SWITCH_PASS_ALL 0x8000
#define SWITCH_TX_FLOW_CTRL 0x2000
#define SWITCH_RX_FLOW_CTRL 0x1000
#define SWITCH_CHECK_LENGTH 0x0800
#define SWITCH_AGING_ENABLE 0x0400
#define SWITCH_FAST_AGING 0x0200
#define SWITCH_AGGR_BACKOFF 0x0100
#define SWITCH_PASS_PAUSE 0x0008
#define SWITCH_LINK_AUTO_AGING 0x0001
/* SGCR2 */
#define KS8842_SGCR2_P 0x0404
#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
#define SWITCH_VLAN_ENABLE 0x8000
#define SWITCH_IGMP_SNOOP 0x4000
#define IPV6_MLD_SNOOP_ENABLE 0x2000
#define IPV6_MLD_SNOOP_OPTION 0x1000
#define PRIORITY_SCHEME_SELECT 0x0800
#define SWITCH_MIRROR_RX_TX 0x0100
#define UNICAST_VLAN_BOUNDARY 0x0080
#define MULTICAST_STORM_DISABLE 0x0040
#define SWITCH_BACK_PRESSURE 0x0020
#define FAIR_FLOW_CTRL 0x0010
#define NO_EXC_COLLISION_DROP 0x0008
#define SWITCH_HUGE_PACKET 0x0004
#define SWITCH_LEGAL_PACKET 0x0002
#define SWITCH_BUF_RESERVE 0x0001
/* SGCR3 */
#define KS8842_SGCR3_P 0x0406
#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
#define BROADCAST_STORM_RATE_LO 0xFF00
#define SWITCH_REPEATER 0x0080
#define SWITCH_HALF_DUPLEX 0x0040
#define SWITCH_FLOW_CTRL 0x0020
#define SWITCH_10_MBIT 0x0010
#define SWITCH_REPLACE_NULL_VID 0x0008
#define BROADCAST_STORM_RATE_HI 0x0007
#define BROADCAST_STORM_RATE 0x07FF
/* SGCR4 */
#define KS8842_SGCR4_P 0x0408
/* SGCR5 */
#define KS8842_SGCR5_P 0x040A
#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
#define LED_MODE 0x8200
#define LED_SPEED_DUPLEX_ACT 0x0000
#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
#define LED_DUPLEX_10_100 0x0200
/* SGCR6 */
#define KS8842_SGCR6_P 0x0410
#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
#define KS8842_PRIORITY_MASK 3
#define KS8842_PRIORITY_SHIFT 2
/* SGCR7 */
#define KS8842_SGCR7_P 0x0412
#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
#define SWITCH_UNK_DEF_PORT_3 0x0004
#define SWITCH_UNK_DEF_PORT_2 0x0002
#define SWITCH_UNK_DEF_PORT_1 0x0001
/* MACAR1 */
#define KS8842_MACAR1_P 0x0470
#define KS8842_MACAR2_P 0x0472
#define KS8842_MACAR3_P 0x0474
#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
/* TOSR1 */
#define KS8842_TOSR1_P 0x0480
#define KS8842_TOSR2_P 0x0482
#define KS8842_TOSR3_P 0x0484
#define KS8842_TOSR4_P 0x0486
#define KS8842_TOSR5_P 0x0488
#define KS8842_TOSR6_P 0x048A
#define KS8842_TOSR7_P 0x0490
#define KS8842_TOSR8_P 0x0492
#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
/* P1CR1 */
#define KS8842_P1CR1_P 0x0500
#define KS8842_P1CR2_P 0x0502
#define KS8842_P1VIDR_P 0x0504
#define KS8842_P1CR3_P 0x0506
#define KS8842_P1IRCR_P 0x0508
#define KS8842_P1ERCR_P 0x050A
#define KS884X_P1SCSLMD_P 0x0510
#define KS884X_P1CR4_P 0x0512
#define KS884X_P1SR_P 0x0514
/* P2CR1 */
#define KS8842_P2CR1_P 0x0520
#define KS8842_P2CR2_P 0x0522
#define KS8842_P2VIDR_P 0x0524
#define KS8842_P2CR3_P 0x0526
#define KS8842_P2IRCR_P 0x0528
#define KS8842_P2ERCR_P 0x052A
#define KS884X_P2SCSLMD_P 0x0530
#define KS884X_P2CR4_P 0x0532
#define KS884X_P2SR_P 0x0534
/* P3CR1 */
#define KS8842_P3CR1_P 0x0540
#define KS8842_P3CR2_P 0x0542
#define KS8842_P3VIDR_P 0x0544
#define KS8842_P3CR3_P 0x0546
#define KS8842_P3IRCR_P 0x0548
#define KS8842_P3ERCR_P 0x054A
#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
#define PORT_CTRL_ADDR(port, addr) \
(addr = KS8842_PORT_1_CTRL_1 + (port) * \
(KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
#define KS8842_PORT_CTRL_1_OFFSET 0x00
#define PORT_BROADCAST_STORM 0x0080
#define PORT_DIFFSERV_ENABLE 0x0040
#define PORT_802_1P_ENABLE 0x0020
#define PORT_BASED_PRIORITY_MASK 0x0018
#define PORT_BASED_PRIORITY_BASE 0x0003
#define PORT_BASED_PRIORITY_SHIFT 3
#define PORT_BASED_PRIORITY_0 0x0000
#define PORT_BASED_PRIORITY_1 0x0008
#define PORT_BASED_PRIORITY_2 0x0010
#define PORT_BASED_PRIORITY_3 0x0018
#define PORT_INSERT_TAG 0x0004
#define PORT_REMOVE_TAG 0x0002
#define PORT_PRIO_QUEUE_ENABLE 0x0001
#define KS8842_PORT_CTRL_2_OFFSET 0x02
#define PORT_INGRESS_VLAN_FILTER 0x4000
#define PORT_DISCARD_NON_VID 0x2000
#define PORT_FORCE_FLOW_CTRL 0x1000
#define PORT_BACK_PRESSURE 0x0800
#define PORT_TX_ENABLE 0x0400
#define PORT_RX_ENABLE 0x0200
#define PORT_LEARN_DISABLE 0x0100
#define PORT_MIRROR_SNIFFER 0x0080
#define PORT_MIRROR_RX 0x0040
#define PORT_MIRROR_TX 0x0020
#define PORT_USER_PRIORITY_CEILING 0x0008
#define PORT_VLAN_MEMBERSHIP 0x0007
#define KS8842_PORT_CTRL_VID_OFFSET 0x04
#define PORT_DEFAULT_VID 0x0001
#define KS8842_PORT_CTRL_3_OFFSET 0x06
#define PORT_INGRESS_LIMIT_MODE 0x000C
#define PORT_INGRESS_ALL 0x0000
#define PORT_INGRESS_UNICAST 0x0004
#define PORT_INGRESS_MULTICAST 0x0008
#define PORT_INGRESS_BROADCAST 0x000C
#define PORT_COUNT_IFG 0x0002
#define PORT_COUNT_PREAMBLE 0x0001
#define KS8842_PORT_IN_RATE_OFFSET 0x08
#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
#define PORT_PRIORITY_RATE 0x0F
#define PORT_PRIORITY_RATE_SHIFT 4
#define KS884X_PORT_LINK_MD 0x10
#define PORT_CABLE_10M_SHORT 0x8000
#define PORT_CABLE_DIAG_RESULT 0x6000
#define PORT_CABLE_STAT_NORMAL 0x0000
#define PORT_CABLE_STAT_OPEN 0x2000
#define PORT_CABLE_STAT_SHORT 0x4000
#define PORT_CABLE_STAT_FAILED 0x6000
#define PORT_START_CABLE_DIAG 0x1000
#define PORT_FORCE_LINK 0x0800
#define PORT_POWER_SAVING_DISABLE 0x0400
#define PORT_PHY_REMOTE_LOOPBACK 0x0200
#define PORT_CABLE_FAULT_COUNTER 0x01FF
#define KS884X_PORT_CTRL_4_OFFSET 0x12
#define PORT_LED_OFF 0x8000
#define PORT_TX_DISABLE 0x4000
#define PORT_AUTO_NEG_RESTART 0x2000
#define PORT_REMOTE_FAULT_DISABLE 0x1000
#define PORT_POWER_DOWN 0x0800
#define PORT_AUTO_MDIX_DISABLE 0x0400
#define PORT_FORCE_MDIX 0x0200
#define PORT_LOOPBACK 0x0100
#define PORT_AUTO_NEG_ENABLE 0x0080
#define PORT_FORCE_100_MBIT 0x0040
#define PORT_FORCE_FULL_DUPLEX 0x0020
#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
#define PORT_AUTO_NEG_100BTX_FD 0x0008
#define PORT_AUTO_NEG_100BTX 0x0004
#define PORT_AUTO_NEG_10BT_FD 0x0002
#define PORT_AUTO_NEG_10BT 0x0001
#define KS884X_PORT_STATUS_OFFSET 0x14
#define PORT_HP_MDIX 0x8000
#define PORT_REVERSED_POLARITY 0x2000
#define PORT_RX_FLOW_CTRL 0x0800
#define PORT_TX_FLOW_CTRL 0x1000
#define PORT_STATUS_SPEED_100MBIT 0x0400
#define PORT_STATUS_FULL_DUPLEX 0x0200
#define PORT_REMOTE_FAULT 0x0100
#define PORT_MDIX_STATUS 0x0080
#define PORT_AUTO_NEG_COMPLETE 0x0040
#define PORT_STATUS_LINK_GOOD 0x0020
#define PORT_REMOTE_SYM_PAUSE 0x0010
#define PORT_REMOTE_100BTX_FD 0x0008
#define PORT_REMOTE_100BTX 0x0004
#define PORT_REMOTE_10BT_FD 0x0002
#define PORT_REMOTE_10BT 0x0001
/*
#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
*/
#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
#define STATIC_MAC_TABLE_VALID 0x00080000
#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
#define STATIC_MAC_TABLE_USE_FID 0x00200000
#define STATIC_MAC_TABLE_FID 0x03C00000
#define STATIC_MAC_FWD_PORTS_SHIFT 16
#define STATIC_MAC_FID_SHIFT 22
/*
#define VLAN_TABLE_VID 00-00000000-00000FFF
#define VLAN_TABLE_FID 00-00000000-0000F000
#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
#define VLAN_TABLE_VALID 00-00000000-00080000
*/
#define VLAN_TABLE_VID 0x00000FFF
#define VLAN_TABLE_FID 0x0000F000
#define VLAN_TABLE_MEMBERSHIP 0x00070000
#define VLAN_TABLE_VALID 0x00080000
#define VLAN_TABLE_FID_SHIFT 12
#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
/*
#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
*/
#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
#define DYNAMIC_MAC_TABLE_FID 0x000F0000
#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
#define DYNAMIC_MAC_TABLE_RESERVED 0x78
#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
#define DYNAMIC_MAC_FID_SHIFT 16
#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
#define DYNAMIC_MAC_ENTRIES_SHIFT 24
#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
/*
#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
#define MIB_COUNTER_VALID 00-00000000-40000000
#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
*/
#define MIB_COUNTER_VALUE 0x3FFFFFFF
#define MIB_COUNTER_VALID 0x40000000
#define MIB_COUNTER_OVERFLOW 0x80000000
#define MIB_PACKET_DROPPED 0x0000FFFF
#define KS_MIB_PACKET_DROPPED_TX_0 0x100
#define KS_MIB_PACKET_DROPPED_TX_1 0x101
#define KS_MIB_PACKET_DROPPED_TX 0x102
#define KS_MIB_PACKET_DROPPED_RX_0 0x103
#define KS_MIB_PACKET_DROPPED_RX_1 0x104
#define KS_MIB_PACKET_DROPPED_RX 0x105
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
#define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
#define MAX_ETHERNET_PACKET_SIZE \
(MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
#define MAX_RX_BUF_SIZE (1912 + 4)
#define ADDITIONAL_ENTRIES 16
#define MAX_MULTICAST_LIST 32
#define HW_MULTICAST_SIZE 8
#define HW_TO_DEV_PORT(port) (port - 1)
enum {
media_connected,
media_disconnected
};
enum {
OID_COUNTER_UNKOWN,
OID_COUNTER_FIRST,
/* total transmit errors */
OID_COUNTER_XMIT_ERROR,
/* total receive errors */
OID_COUNTER_RCV_ERROR,
OID_COUNTER_LAST
};
/*
* Hardware descriptor definitions
*/
#define DESC_ALIGNMENT 16
#define BUFFER_ALIGNMENT 8
#define NUM_OF_RX_DESC 64
#define NUM_OF_TX_DESC 64
#define KS_DESC_RX_FRAME_LEN 0x000007FF
#define KS_DESC_RX_FRAME_TYPE 0x00008000
#define KS_DESC_RX_ERROR_CRC 0x00010000
#define KS_DESC_RX_ERROR_RUNT 0x00020000
#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
#define KS_DESC_RX_ERROR_PHY 0x00080000
#define KS884X_DESC_RX_PORT_MASK 0x00300000
#define KS_DESC_RX_MULTICAST 0x01000000
#define KS_DESC_RX_ERROR 0x02000000
#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
#define KS_DESC_RX_LAST 0x20000000
#define KS_DESC_RX_FIRST 0x40000000
#define KS_DESC_RX_ERROR_COND \
(KS_DESC_RX_ERROR_CRC | \
KS_DESC_RX_ERROR_RUNT | \
KS_DESC_RX_ERROR_PHY | \
KS_DESC_RX_ERROR_TOO_LONG)
#define KS_DESC_HW_OWNED 0x80000000
#define KS_DESC_BUF_SIZE 0x000007FF
#define KS884X_DESC_TX_PORT_MASK 0x00300000
#define KS_DESC_END_OF_RING 0x02000000
#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
#define KS_DESC_TX_LAST 0x20000000
#define KS_DESC_TX_FIRST 0x40000000
#define KS_DESC_TX_INTERRUPT 0x80000000
#define KS_DESC_PORT_SHIFT 20
#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
#define KS_DESC_TX_MASK \
(KS_DESC_TX_INTERRUPT | \
KS_DESC_TX_FIRST | \
KS_DESC_TX_LAST | \
KS_DESC_TX_CSUM_GEN_IP | \
KS_DESC_TX_CSUM_GEN_TCP | \
KS_DESC_TX_CSUM_GEN_UDP | \
KS_DESC_BUF_SIZE)
struct ksz_desc_rx_stat {
#ifdef __BIG_ENDIAN_BITFIELD
u32 hw_owned:1;
u32 first_desc:1;
u32 last_desc:1;
u32 csum_err_ip:1;
u32 csum_err_tcp:1;
u32 csum_err_udp:1;
u32 error:1;
u32 multicast:1;
u32 src_port:4;
u32 err_phy:1;
u32 err_too_long:1;
u32 err_runt:1;
u32 err_crc:1;
u32 frame_type:1;
u32 reserved1:4;
u32 frame_len:11;
#else
u32 frame_len:11;
u32 reserved1:4;
u32 frame_type:1;
u32 err_crc:1;
u32 err_runt:1;
u32 err_too_long:1;
u32 err_phy:1;
u32 src_port:4;
u32 multicast:1;
u32 error:1;
u32 csum_err_udp:1;
u32 csum_err_tcp:1;
u32 csum_err_ip:1;
u32 last_desc:1;
u32 first_desc:1;
u32 hw_owned:1;
#endif
};
struct ksz_desc_tx_stat {
#ifdef __BIG_ENDIAN_BITFIELD
u32 hw_owned:1;
u32 reserved1:31;
#else
u32 reserved1:31;
u32 hw_owned:1;
#endif
};
struct ksz_desc_rx_buf {
#ifdef __BIG_ENDIAN_BITFIELD
u32 reserved4:6;
u32 end_of_ring:1;
u32 reserved3:14;
u32 buf_size:11;
#else
u32 buf_size:11;
u32 reserved3:14;
u32 end_of_ring:1;
u32 reserved4:6;
#endif
};
struct ksz_desc_tx_buf {
#ifdef __BIG_ENDIAN_BITFIELD
u32 intr:1;
u32 first_seg:1;
u32 last_seg:1;
u32 csum_gen_ip:1;
u32 csum_gen_tcp:1;
u32 csum_gen_udp:1;
u32 end_of_ring:1;
u32 reserved4:1;
u32 dest_port:4;
u32 reserved3:9;
u32 buf_size:11;
#else
u32 buf_size:11;
u32 reserved3:9;
u32 dest_port:4;
u32 reserved4:1;
u32 end_of_ring:1;
u32 csum_gen_udp:1;
u32 csum_gen_tcp:1;
u32 csum_gen_ip:1;
u32 last_seg:1;
u32 first_seg:1;
u32 intr:1;
#endif
};
union desc_stat {
struct ksz_desc_rx_stat rx;
struct ksz_desc_tx_stat tx;
u32 data;
};
union desc_buf {
struct ksz_desc_rx_buf rx;
struct ksz_desc_tx_buf tx;
u32 data;
};
/**
* struct ksz_hw_desc - Hardware descriptor data structure
* @ctrl: Descriptor control value.
* @buf: Descriptor buffer value.
* @addr: Physical address of memory buffer.
* @next: Pointer to next hardware descriptor.
*/
struct ksz_hw_desc {
union desc_stat ctrl;
union desc_buf buf;
u32 addr;
u32 next;
};
/**
* struct ksz_sw_desc - Software descriptor data structure
* @ctrl: Descriptor control value.
* @buf: Descriptor buffer value.
* @buf_size: Current buffers size value in hardware descriptor.
*/
struct ksz_sw_desc {
union desc_stat ctrl;
union desc_buf buf;
u32 buf_size;
};
/**
* struct ksz_dma_buf - OS dependent DMA buffer data structure
* @skb: Associated socket buffer.
* @dma: Associated physical DMA address.
* len: Actual len used.
*/
struct ksz_dma_buf {
struct sk_buff *skb;
dma_addr_t dma;
int len;
};
/**
* struct ksz_desc - Descriptor structure
* @phw: Hardware descriptor pointer to uncached physical memory.
* @sw: Cached memory to hold hardware descriptor values for
* manipulation.
* @dma_buf: Operating system dependent data structure to hold physical
* memory buffer allocation information.
*/
struct ksz_desc {
struct ksz_hw_desc *phw;
struct ksz_sw_desc sw;
struct ksz_dma_buf dma_buf;
};
#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
/**
* struct ksz_desc_info - Descriptor information data structure
* @ring: First descriptor in the ring.
* @cur: Current descriptor being manipulated.
* @ring_virt: First hardware descriptor in the ring.
* @ring_phys: The physical address of the first descriptor of the ring.
* @size: Size of hardware descriptor.
* @alloc: Number of descriptors allocated.
* @avail: Number of descriptors available for use.
* @last: Index for last descriptor released to hardware.
* @next: Index for next descriptor available for use.
* @mask: Mask for index wrapping.
*/
struct ksz_desc_info {
struct ksz_desc *ring;
struct ksz_desc *cur;
struct ksz_hw_desc *ring_virt;
u32 ring_phys;
int size;
int alloc;
int avail;
int last;
int next;
int mask;
};
/*
* KSZ8842 switch definitions
*/
enum {
TABLE_STATIC_MAC = 0,
TABLE_VLAN,
TABLE_DYNAMIC_MAC,
TABLE_MIB
};
#define LEARNED_MAC_TABLE_ENTRIES 1024
#define STATIC_MAC_TABLE_ENTRIES 8
/**
* struct ksz_mac_table - Static MAC table data structure
* @mac_addr: MAC address to filter.
* @vid: VID value.
* @fid: FID value.
* @ports: Port membership.
* @override: Override setting.
* @use_fid: FID use setting.
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
u8 mac_addr[ETH_ALEN];
u16 vid;
u8 fid;
u8 ports;
u8 override:1;
u8 use_fid:1;
u8 valid:1;
};
#define VLAN_TABLE_ENTRIES 16
/**
* struct ksz_vlan_table - VLAN table data structure
* @vid: VID value.
* @fid: FID value.
* @member: Port membership.
*/
struct ksz_vlan_table {
u16 vid;
u8 fid;
u8 member;
};
#define DIFFSERV_ENTRIES 64
#define PRIO_802_1P_ENTRIES 8
#define PRIO_QUEUES 4
#define SWITCH_PORT_NUM 2
#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
#define HOST_MASK (1 << SWITCH_PORT_NUM)
#define PORT_MASK 7
#define MAIN_PORT 0
#define OTHER_PORT 1
#define HOST_PORT SWITCH_PORT_NUM
#define PORT_COUNTER_NUM 0x20
#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
#define MIB_COUNTER_RX_LO_PRIORITY 0x00
#define MIB_COUNTER_RX_HI_PRIORITY 0x01
#define MIB_COUNTER_RX_UNDERSIZE 0x02
#define MIB_COUNTER_RX_FRAGMENT 0x03
#define MIB_COUNTER_RX_OVERSIZE 0x04
#define MIB_COUNTER_RX_JABBER 0x05
#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
#define MIB_COUNTER_RX_CRC_ERR 0x07
#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
#define MIB_COUNTER_RX_CTRL_8808 0x09
#define MIB_COUNTER_RX_PAUSE 0x0A
#define MIB_COUNTER_RX_BROADCAST 0x0B
#define MIB_COUNTER_RX_MULTICAST 0x0C
#define MIB_COUNTER_RX_UNICAST 0x0D
#define MIB_COUNTER_RX_OCTET_64 0x0E
#define MIB_COUNTER_RX_OCTET_65_127 0x0F
#define MIB_COUNTER_RX_OCTET_128_255 0x10
#define MIB_COUNTER_RX_OCTET_256_511 0x11
#define MIB_COUNTER_RX_OCTET_512_1023 0x12
#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
#define MIB_COUNTER_TX_LO_PRIORITY 0x14
#define MIB_COUNTER_TX_HI_PRIORITY 0x15
#define MIB_COUNTER_TX_LATE_COLLISION 0x16
#define MIB_COUNTER_TX_PAUSE 0x17
#define MIB_COUNTER_TX_BROADCAST 0x18
#define MIB_COUNTER_TX_MULTICAST 0x19
#define MIB_COUNTER_TX_UNICAST 0x1A
#define MIB_COUNTER_TX_DEFERRED 0x1B
#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
/**
* struct ksz_port_mib - Port MIB data structure
* @cnt_ptr: Current pointer to MIB counter index.
* @link_down: Indication the link has just gone down.
* @state: Connection status of the port.
* @mib_start: The starting counter index. Some ports do not start at 0.
* @counter: 64-bit MIB counter value.
* @dropped: Temporary buffer to remember last read packet dropped values.
*
* MIB counters needs to be read periodically so that counters do not get
* overflowed and give incorrect values. A right balance is needed to
* satisfy this condition and not waste too much CPU time.
*
* It is pointless to read MIB counters when the port is disconnected. The
* @state provides the connection status so that MIB counters are read only
* when the port is connected. The @link_down indicates the port is just
* disconnected so that all MIB counters are read one last time to update the
* information.
*/
struct ksz_port_mib {
u8 cnt_ptr;
u8 link_down;
u8 state;
u8 mib_start;
u64 counter[TOTAL_PORT_COUNTER_NUM];
u32 dropped[2];
};
/**
* struct ksz_port_cfg - Port configuration data structure
* @vid: VID value.
* @member: Port membership.
* @port_prio: Port priority.
* @rx_rate: Receive priority rate.
* @tx_rate: Transmit priority rate.
* @stp_state: Current Spanning Tree Protocol state.
*/
struct ksz_port_cfg {
u16 vid;
u8 member;
u8 port_prio;
u32 rx_rate[PRIO_QUEUES];
u32 tx_rate[PRIO_QUEUES];
int stp_state;
};
/**
* struct ksz_switch - KSZ8842 switch data structure
* @mac_table: MAC table entries information.
* @vlan_table: VLAN table entries information.
* @port_cfg: Port configuration information.
* @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
* (bit7 ~ bit2) field.
* @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
* Tag priority field.
* @br_addr: Bridge address. Used for STP.
* @other_addr: Other MAC address. Used for multiple network device mode.
* @broad_per: Broadcast storm percentage.
* @member: Current port membership. Used for STP.
*/
struct ksz_switch {
struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
u8 br_addr[ETH_ALEN];
u8 other_addr[ETH_ALEN];
u8 broad_per;
u8 member;
};
#define TX_RATE_UNIT 10000
/**
* struct ksz_port_info - Port information data structure
* @state: Connection status of the port.
* @tx_rate: Transmit rate divided by 10000 to get Mbit.
* @duplex: Duplex mode.
* @advertised: Advertised auto-negotiation setting. Used to determine link.
* @partner: Auto-negotiation partner setting. Used to determine link.
* @port_id: Port index to access actual hardware register.
* @pdev: Pointer to OS dependent network device.
*/
struct ksz_port_info {
uint state;
uint tx_rate;
u8 duplex;
u8 advertised;
u8 partner;
u8 port_id;
void *pdev;
};
#define MAX_TX_HELD_SIZE 52000
/* Hardware features and bug fixes. */
#define LINK_INT_WORKING (1 << 0)
#define SMALL_PACKET_TX_BUG (1 << 1)
#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
#define RX_HUGE_FRAME (1 << 4)
#define STP_SUPPORT (1 << 8)
/* Software overrides. */
#define PAUSE_FLOW_CTRL (1 << 0)
#define FAST_AGING (1 << 1)
/**
* struct ksz_hw - KSZ884X hardware data structure
* @io: Virtual address assigned.
* @ksz_switch: Pointer to KSZ8842 switch.
* @port_info: Port information.
* @port_mib: Port MIB information.
* @dev_count: Number of network devices this hardware supports.
* @dst_ports: Destination ports in switch for transmission.
* @id: Hardware ID. Used for display only.
* @mib_cnt: Number of MIB counters this hardware has.
* @mib_port_cnt: Number of ports with MIB counters.
* @tx_cfg: Cached transmit control settings.
* @rx_cfg: Cached receive control settings.
* @intr_mask: Current interrupt mask.
* @intr_set: Current interrup set.
* @intr_blocked: Interrupt blocked.
* @rx_desc_info: Receive descriptor information.
* @tx_desc_info: Transmit descriptor information.
* @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
* @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
* @tx_size: Transmit data size. Used for TX optimization.
* The maximum is defined by MAX_TX_HELD_SIZE.
* @perm_addr: Permanent MAC address.
* @override_addr: Overrided MAC address.
* @address: Additional MAC address entries.
* @addr_list_size: Additional MAC address list size.
* @mac_override: Indication of MAC address overrided.
* @promiscuous: Counter to keep track of promiscuous mode set.
* @all_multi: Counter to keep track of all multicast mode set.
* @multi_list: Multicast address entries.
* @multi_bits: Cached multicast hash table settings.
* @multi_list_size: Multicast address list size.
* @enabled: Indication of hardware enabled.
* @rx_stop: Indication of receive process stop.
* @features: Hardware features to enable.
* @overrides: Hardware features to override.
* @parent: Pointer to parent, network device private structure.
*/
struct ksz_hw {
void __iomem *io;
struct ksz_switch *ksz_switch;
struct ksz_port_info port_info[SWITCH_PORT_NUM];
struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
int dev_count;
int dst_ports;
int id;
int mib_cnt;
int mib_port_cnt;
u32 tx_cfg;
u32 rx_cfg;
u32 intr_mask;
u32 intr_set;
uint intr_blocked;
struct ksz_desc_info rx_desc_info;
struct ksz_desc_info tx_desc_info;
int tx_int_cnt;
int tx_int_mask;
int tx_size;
u8 perm_addr[ETH_ALEN];
u8 override_addr[ETH_ALEN];
u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
u8 enabled;
u8 rx_stop;
u8 reserved2[1];
uint features;
uint overrides;
void *parent;
};
enum {
PHY_NO_FLOW_CTRL,
PHY_FLOW_CTRL,
PHY_TX_ONLY,
PHY_RX_ONLY
};
/**
* struct ksz_port - Virtual port data structure
* @duplex: Duplex mode setting. 1 for half duplex, 2 for full
* duplex, and 0 for auto, which normally results in full
* duplex.
* @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
* 0 for auto, which normally results in 100 Mbit.
* @force_link: Force link setting. 0 for auto-negotiation, and 1 for
* force.
* @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
* control, and PHY_FLOW_CTRL for flow control.
* PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
* Mbit PHY.
* @first_port: Index of first port this port supports.
* @mib_port_cnt: Number of ports with MIB counters.
* @port_cnt: Number of ports this port supports.
* @counter: Port statistics counter.
* @hw: Pointer to hardware structure.
* @linked: Pointer to port information linked to this port.
*/
struct ksz_port {
u8 duplex;
u8 speed;
u8 force_link;
u8 flow_ctrl;
int first_port;
int mib_port_cnt;
int port_cnt;
u64 counter[OID_COUNTER_LAST];
struct ksz_hw *hw;
struct ksz_port_info *linked;
};
/**
* struct ksz_timer_info - Timer information data structure
* @timer: Kernel timer.
* @cnt: Running timer counter.
* @max: Number of times to run timer; -1 for infinity.
* @period: Timer period in jiffies.
*/
struct ksz_timer_info {
struct timer_list timer;
int cnt;
int max;
int period;
};
/**
* struct ksz_shared_mem - OS dependent shared memory data structure
* @dma_addr: Physical DMA address allocated.
* @alloc_size: Allocation size.
* @phys: Actual physical address used.
* @alloc_virt: Virtual address allocated.
* @virt: Actual virtual address used.
*/
struct ksz_shared_mem {
dma_addr_t dma_addr;
uint alloc_size;
uint phys;
u8 *alloc_virt;
u8 *virt;
};
/**
* struct ksz_counter_info - OS dependent counter information data structure
* @counter: Wait queue to wakeup after counters are read.
* @time: Next time in jiffies to read counter.
* @read: Indication of counters read in full or not.
*/
struct ksz_counter_info {
wait_queue_head_t counter;
unsigned long time;
int read;
};
/**
* struct dev_info - Network device information data structure
* @dev: Pointer to network device.
* @pdev: Pointer to PCI device.
* @hw: Hardware structure.
* @desc_pool: Physical memory used for descriptor pool.
* @hwlock: Spinlock to prevent hardware from accessing.
* @lock: Mutex lock to prevent device from accessing.
* @dev_rcv: Receive process function used.
* @last_skb: Socket buffer allocated for descriptor rx fragments.
* @skb_index: Buffer index for receiving fragments.
* @skb_len: Buffer length for receiving fragments.
* @mib_read: Workqueue to read MIB counters.
* @mib_timer_info: Timer to read MIB counters.
* @counter: Used for MIB reading.
* @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
* the maximum is MAX_RX_BUF_SIZE.
* @opened: Counter to keep track of device open.
* @rx_tasklet: Receive processing tasklet.
* @tx_tasklet: Transmit processing tasklet.
* @wol_enable: Wake-on-LAN enable set by ethtool.
* @wol_support: Wake-on-LAN support used by ethtool.
* @pme_wait: Used for KSZ8841 power management.
*/
struct dev_info {
struct net_device *dev;
struct pci_dev *pdev;
struct ksz_hw hw;
struct ksz_shared_mem desc_pool;
spinlock_t hwlock;
struct mutex lock;
int (*dev_rcv)(struct dev_info *);
struct sk_buff *last_skb;
int skb_index;
int skb_len;
struct work_struct mib_read;
struct ksz_timer_info mib_timer_info;
struct ksz_counter_info counter[TOTAL_PORT_NUM];
int mtu;
int opened;
struct tasklet_struct rx_tasklet;
struct tasklet_struct tx_tasklet;
int wol_enable;
int wol_support;
unsigned long pme_wait;
};
/**
* struct dev_priv - Network device private data structure
* @adapter: Adapter device information.
* @port: Port information.
* @monitor_time_info: Timer to monitor ports.
* @proc_sem: Semaphore for proc accessing.
* @id: Device ID.
* @mii_if: MII interface information.
* @advertising: Temporary variable to store advertised settings.
* @msg_enable: The message flags controlling driver output.
* @media_state: The connection status of the device.
* @multicast: The all multicast state of the device.
* @promiscuous: The promiscuous state of the device.
*/
struct dev_priv {
struct dev_info *adapter;
struct ksz_port port;
struct ksz_timer_info monitor_timer_info;
struct semaphore proc_sem;
int id;
struct mii_if_info mii_if;
u32 advertising;
u32 msg_enable;
int media_state;
int multicast;
int promiscuous;
};
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
#define DRV_RELDATE "Feb 8, 2010"
static char version[] __devinitdata =
"Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
/*
* Interrupt processing primary routines
*/
static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
{
writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
}
static inline void hw_dis_intr(struct ksz_hw *hw)
{
hw->intr_blocked = hw->intr_mask;
writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
{
hw->intr_set = interrupt;
writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_ena_intr(struct ksz_hw *hw)
{
hw->intr_blocked = 0;
hw_set_intr(hw, hw->intr_mask);
}
static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
{
hw->intr_mask &= ~(bit);
}
static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
{
u32 read_intr;
read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = read_intr & ~interrupt;
writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
hw_dis_intr_bit(hw, interrupt);
}
/**
* hw_turn_on_intr - turn on specified interrupts
* @hw: The hardware instance.
* @bit: The interrupt bits to be on.
*
* This routine turns on the specified interrupts in the interrupt mask so that
* those interrupts will be enabled.
*/
static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
{
hw->intr_mask |= bit;
if (!hw->intr_blocked)
hw_set_intr(hw, hw->intr_mask);
}
static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
{
u32 read_intr;
read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = read_intr | interrupt;
writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
{
*status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
*status = *status & hw->intr_set;
}
static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
{
if (interrupt)
hw_ena_intr(hw);
}
/**
* hw_block_intr - block hardware interrupts
*
* This function blocks all interrupts of the hardware and returns the current
* interrupt enable mask so that interrupts can be restored later.
*
* Return the current interrupt enable mask.
*/
static uint hw_block_intr(struct ksz_hw *hw)
{
uint interrupt = 0;
if (!hw->intr_blocked) {
hw_dis_intr(hw);
interrupt = hw->intr_blocked;
}
return interrupt;
}
/*
* Hardware descriptor routines
*/
static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
{
status.rx.hw_owned = 0;
desc->phw->ctrl.data = cpu_to_le32(status.data);
}
static inline void release_desc(struct ksz_desc *desc)
{
desc->sw.ctrl.tx.hw_owned = 1;
if (desc->sw.buf_size != desc->sw.buf.data) {
desc->sw.buf_size = desc->sw.buf.data;
desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
}
desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
}
static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
{
*desc = &info->ring[info->last];
info->last++;
info->last &= info->mask;
info->avail--;
(*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
}
static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
{
desc->phw->addr = cpu_to_le32(addr);
}
static inline void set_rx_len(struct ksz_desc *desc, u32 len)
{
desc->sw.buf.rx.buf_size = len;
}
static inline void get_tx_pkt(struct ksz_desc_info *info,
struct ksz_desc **desc)
{
*desc = &info->ring[info->next];
info->next++;
info->next &= info->mask;
info->avail--;
(*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
}
static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
{
desc->phw->addr = cpu_to_le32(addr);
}
static inline void set_tx_len(struct ksz_desc *desc, u32 len)
{
desc->sw.buf.tx.buf_size = len;
}
/* Switch functions */
#define TABLE_READ 0x10
#define TABLE_SEL_SHIFT 2
#define HW_DELAY(hw, reg) \
do { \
u16 dummy; \
dummy = readw(hw->io + reg); \
} while (0)
/**
* sw_r_table - read 4 bytes of data from switch table
* @hw: The hardware instance.
* @table: The table selector.
* @addr: The address of the table entry.
* @data: Buffer to store the read data.
*
* This routine reads 4 bytes of data from the table of the switch.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
{
u16 ctrl_addr;
uint interrupt;
ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
interrupt = hw_block_intr(hw);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
*data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
hw_restore_intr(hw, interrupt);
}
/**
* sw_w_table_64 - write 8 bytes of data to the switch table
* @hw: The hardware instance.
* @table: The table selector.
* @addr: The address of the table entry.
* @data_hi: The high part of data to be written (bit63 ~ bit32).
* @data_lo: The low part of data to be written (bit31 ~ bit0).
*
* This routine writes 8 bytes of data to the table of the switch.
* Hardware interrupts are disabled to minimize corruption of written data.
*/
static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
u32 data_lo)
{
u16 ctrl_addr;
uint interrupt;
ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
interrupt = hw_block_intr(hw);
writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
hw_restore_intr(hw, interrupt);
}
/**
* sw_w_sta_mac_table - write to the static MAC table
* @hw: The hardware instance.
* @addr: The address of the table entry.
* @mac_addr: The MAC address.
* @ports: The port members.
* @override: The flag to override the port receive/transmit settings.
* @valid: The flag to indicate entry is valid.
* @use_fid: The flag to indicate the FID is valid.
* @fid: The FID value.
*
* This routine writes an entry of the static MAC table of the switch. It
* calls sw_w_table_64() to write the data.
*/
static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
u8 ports, int override, int valid, int use_fid, u8 fid)
{
u32 data_hi;
u32 data_lo;
data_lo = ((u32) mac_addr[2] << 24) |
((u32) mac_addr[3] << 16) |
((u32) mac_addr[4] << 8) | mac_addr[5];
data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
if (override)
data_hi |= STATIC_MAC_TABLE_OVERRIDE;
if (use_fid) {
data_hi |= STATIC_MAC_TABLE_USE_FID;
data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
}
if (valid)
data_hi |= STATIC_MAC_TABLE_VALID;
sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
}
/**
* sw_r_vlan_table - read from the VLAN table
* @hw: The hardware instance.
* @addr: The address of the table entry.
* @vid: Buffer to store the VID.
* @fid: Buffer to store the VID.
* @member: Buffer to store the port membership.
*
* This function reads an entry of the VLAN table of the switch. It calls
* sw_r_table() to get the data.
*
* Return 0 if the entry is valid; otherwise -1.
*/
static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
u8 *member)
{
u32 data;
sw_r_table(hw, TABLE_VLAN, addr, &data);
if (data & VLAN_TABLE_VALID) {
*vid = (u16)(data & VLAN_TABLE_VID);
*fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
*member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
VLAN_TABLE_MEMBERSHIP_SHIFT);
return 0;
}
return -1;
}
/**
* port_r_mib_cnt - read MIB counter
* @hw: The hardware instance.
* @port: The port index.
* @addr: The address of the counter.
* @cnt: Buffer to store the counter.
*
* This routine reads a MIB counter of the port.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
{
u32 data;
u16 ctrl_addr;
uint interrupt;
int timeout;
ctrl_addr = addr + PORT_COUNTER_NUM * port;
interrupt = hw_block_intr(hw);
ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
for (timeout = 100; timeout > 0; timeout--) {
data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
if (data & MIB_COUNTER_VALID) {
if (data & MIB_COUNTER_OVERFLOW)
*cnt += MIB_COUNTER_VALUE + 1;
*cnt += data & MIB_COUNTER_VALUE;
break;
}
}
hw_restore_intr(hw, interrupt);
}
/**
* port_r_mib_pkt - read dropped packet counts
* @hw: The hardware instance.
* @port: The port index.
* @cnt: Buffer to store the receive and transmit dropped packet counts.
*
* This routine reads the dropped packet counts of the port.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
{
u32 cur;
u32 data;
u16 ctrl_addr;
uint interrupt;
int index;
index = KS_MIB_PACKET_DROPPED_RX_0 + port;
do {
interrupt = hw_block_intr(hw);
ctrl_addr = (u16) index;
ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
<< 8);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
hw_restore_intr(hw, interrupt);
data &= MIB_PACKET_DROPPED;
cur = *last;
if (data != cur) {
*last = data;
if (data < cur)
data += MIB_PACKET_DROPPED + 1;
data -= cur;
*cnt += data;
}
++last;
++cnt;
index -= KS_MIB_PACKET_DROPPED_TX -
KS_MIB_PACKET_DROPPED_TX_0 + 1;
} while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
}
/**
* port_r_cnt - read MIB counters periodically
* @hw: The hardware instance.
* @port: The port index.
*
* This routine is used to read the counters of the port periodically to avoid
* counter overflow. The hardware should be acquired first before calling this
* routine.
*
* Return non-zero when not all counters not read.
*/
static int port_r_cnt(struct ksz_hw *hw, int port)
{
struct ksz_port_mib *mib = &hw->port_mib[port];
if (mib->mib_start < PORT_COUNTER_NUM)
while (mib->cnt_ptr < PORT_COUNTER_NUM) {
port_r_mib_cnt(hw, port, mib->cnt_ptr,
&mib->counter[mib->cnt_ptr]);
++mib->cnt_ptr;
}
if (hw->mib_cnt > PORT_COUNTER_NUM)
port_r_mib_pkt(hw, port, mib->dropped,
&mib->counter[PORT_COUNTER_NUM]);
mib->cnt_ptr = 0;
return 0;
}
/**
* port_init_cnt - initialize MIB counter values
* @hw: The hardware instance.
* @port: The port index.
*
* This routine is used to initialize all counters to zero if the hardware
* cannot do it after reset.
*/
static void port_init_cnt(struct ksz_hw *hw, int port)
{
struct ksz_port_mib *mib = &hw->port_mib[port];
mib->cnt_ptr = 0;
if (mib->mib_start < PORT_COUNTER_NUM)
do {
port_r_mib_cnt(hw, port, mib->cnt_ptr,
&mib->counter[mib->cnt_ptr]);
++mib->cnt_ptr;
} while (mib->cnt_ptr < PORT_COUNTER_NUM);
if (hw->mib_cnt > PORT_COUNTER_NUM)
port_r_mib_pkt(hw, port, mib->dropped,
&mib->counter[PORT_COUNTER_NUM]);
memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
mib->cnt_ptr = 0;
}
/*
* Port functions
*/
/**
* port_chk - check port register bits
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @bits: The data bits to check.
*
* This function checks whether the specified bits of the port register are set
* or not.
*
* Return 0 if the bits are not set.
*/
static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
{
u32 addr;
u16 data;
PORT_CTRL_ADDR(port, addr);
addr += offset;
data = readw(hw->io + addr);
return (data & bits) == bits;
}
/**
* port_cfg - set port register bits
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @bits: The data bits to set.
* @set: The flag indicating whether the bits are to be set or not.
*
* This routine sets or resets the specified bits of the port register.
*/
static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
int set)
{
u32 addr;
u16 data;
PORT_CTRL_ADDR(port, addr);
addr += offset;
data = readw(hw->io + addr);
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/**
* port_chk_shift - check port bit
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the register.
* @shift: Number of bits to shift.
*
* This function checks whether the specified port is set in the register or
* not.
*
* Return 0 if the port is not set.
*/
static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
{
u16 data;
u16 bit = 1 << port;
data = readw(hw->io + addr);
data >>= shift;
return (data & bit) == bit;
}
/**
* port_cfg_shift - set port bit
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the register.
* @shift: Number of bits to shift.
* @set: The flag indicating whether the port is to be set or not.
*
* This routine sets or resets the specified port in the register.
*/
static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
int set)
{
u16 data;
u16 bits = 1 << port;
data = readw(hw->io + addr);
bits <<= shift;
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/**
* port_r8 - read byte from port register
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Buffer to store the data.
*
* This routine reads a byte from the port register.
*/
static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
*data = readb(hw->io + addr);
}
/**
* port_r16 - read word from port register.
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Buffer to store the data.
*
* This routine reads a word from the port register.
*/
static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
*data = readw(hw->io + addr);
}
/**
* port_w16 - write word to port register.
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Data to write.
*
* This routine writes a word to the port register.
*/
static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
writew(data, hw->io + addr);
}
/**
* sw_chk - check switch register bits
* @hw: The hardware instance.
* @addr: The address of the switch register.
* @bits: The data bits to check.
*
* This function checks whether the specified bits of the switch register are
* set or not.
*
* Return 0 if the bits are not set.
*/
static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
{
u16 data;
data = readw(hw->io + addr);
return (data & bits) == bits;
}
/**
* sw_cfg - set switch register bits
* @hw: The hardware instance.
* @addr: The address of the switch register.
* @bits: The data bits to set.
* @set: The flag indicating whether the bits are to be set or not.
*
* This function sets or resets the specified bits of the switch register.
*/
static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
{
u16 data;
data = readw(hw->io + addr);
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/* Bandwidth */
static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
}
static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
}
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROTECTION_RATE 10
/* 148,800 frames * 67 ms / 100 */
#define BROADCAST_STORM_VALUE 9969
/**
* sw_cfg_broad_storm - configure broadcast storm threshold
* @hw: The hardware instance.
* @percent: Broadcast storm threshold in percent of transmit rate.
*
* This routine configures the broadcast storm threshold of the switch.
*/
static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
{
u16 data;
u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
if (value > BROADCAST_STORM_RATE)
value = BROADCAST_STORM_RATE;
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
}
/**
* sw_get_board_storm - get broadcast storm threshold
* @hw: The hardware instance.
* @percent: Buffer to store the broadcast storm threshold percentage.
*
* This routine retrieves the broadcast storm threshold of the switch.
*/
static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
{
int num;
u16 data;
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
num = (data & BROADCAST_STORM_RATE_HI);
num <<= 8;
num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
*percent = (u8) num;
}
/**
* sw_dis_broad_storm - disable broadstorm
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the broadcast storm limit function of the switch.
*/
static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
{
port_cfg_broad_storm(hw, port, 0);
}
/**
* sw_ena_broad_storm - enable broadcast storm
* @hw: The hardware instance.
* @port: The port index.
*
* This routine enables the broadcast storm limit function of the switch.
*/
static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
{
sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
port_cfg_broad_storm(hw, port, 1);
}
/**
* sw_init_broad_storm - initialize broadcast storm
* @hw: The hardware instance.
*
* This routine initializes the broadcast storm limit function of the switch.
*/
static void sw_init_broad_storm(struct ksz_hw *hw)
{
int port;
hw->ksz_switch->broad_per = 1;
sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
for (port = 0; port < TOTAL_PORT_NUM; port++)
sw_dis_broad_storm(hw, port);
sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
}
/**
* hw_cfg_broad_storm - configure broadcast storm
* @hw: The hardware instance.
* @percent: Broadcast storm threshold in percent of transmit rate.
*
* This routine configures the broadcast storm threshold of the switch.
* It is called by user functions. The hardware should be acquired first.
*/
static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
{
if (percent > 100)
percent = 100;
sw_cfg_broad_storm(hw, percent);
sw_get_broad_storm(hw, &percent);
hw->ksz_switch->broad_per = percent;
}
/**
* sw_dis_prio_rate - disable switch priority rate
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the priority rate function of the switch.
*/
static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_IN_RATE_OFFSET;
writel(0, hw->io + addr);
}
/**
* sw_init_prio_rate - initialize switch prioirty rate
* @hw: The hardware instance.
*
* This routine initializes the priority rate function of the switch.
*/
static void sw_init_prio_rate(struct ksz_hw *hw)
{
int port;
int prio;
struct ksz_switch *sw = hw->ksz_switch;
for (port = 0; port < TOTAL_PORT_NUM; port++) {
for (prio = 0; prio < PRIO_QUEUES; prio++) {
sw->port_cfg[port].rx_rate[prio] =
sw->port_cfg[port].tx_rate[prio] = 0;
}
sw_dis_prio_rate(hw, port);
}
}
/* Communication */
static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
}
static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
}
static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
}
static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
}
/* Spanning Tree */
static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
}
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
}
static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
}
static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
}
static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
{
if (!(hw->overrides & FAST_AGING)) {
sw_cfg_fast_aging(hw, 1);
mdelay(1);
sw_cfg_fast_aging(hw, 0);
}
}
/* VLAN */
static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
}
static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
}
static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
}
static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
}
static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
}
static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
}
static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
}
static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
}
/* Mirroring */
static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
}
static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
}
static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
}
static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
}
static void sw_init_mirror(struct ksz_hw *hw)
{
int port;
for (port = 0; port < TOTAL_PORT_NUM; port++) {
port_cfg_mirror_sniffer(hw, port, 0);
port_cfg_mirror_rx(hw, port, 0);
port_cfg_mirror_tx(hw, port, 0);
}
sw_cfg_mirror_rx_tx(hw, 0);
}
static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
SWITCH_UNK_DEF_PORT_ENABLE, set);
}
static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
{
return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
SWITCH_UNK_DEF_PORT_ENABLE);
}
static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
{
port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
}
static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
{
return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
}
/* Priority */
static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
}
static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
}
static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
}
static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
}
static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
}
static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
}
static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
}
static inline int port_chk_prio(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
}
/**
* sw_dis_diffserv - disable switch DiffServ priority
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the DiffServ priority function of the switch.
*/
static void sw_dis_diffserv(struct ksz_hw *hw, int port)
{
port_cfg_diffserv(hw, port, 0);
}
/**
* sw_dis_802_1p - disable switch 802.1p priority
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the 802.1p priority function of the switch.
*/
static void sw_dis_802_1p(struct ksz_hw *hw, int port)
{
port_cfg_802_1p(hw, port, 0);
}
/**
* sw_cfg_replace_null_vid -
* @hw: The hardware instance.
* @set: The flag to disable or enable.
*
*/
static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
}
/**
* sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
* @hw: The hardware instance.
* @port: The port index.
* @set: The flag to disable or enable.
*
* This routine enables the 802.1p priority re-mapping function of the switch.
* That allows 802.1p priority field to be replaced with the port's default
* tag's priority value if the ingress packet's 802.1p priority has a higher
* priority than port's default tag's priority.
*/
static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
{
port_cfg_replace_vid(hw, port, set);
}
/**
* sw_cfg_port_based - configure switch port based priority
* @hw: The hardware instance.
* @port: The port index.
* @prio: The priority to set.
*
* This routine configures the port based priority of the switch.
*/
static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
{
u16 data;
if (prio > PORT_BASED_PRIORITY_BASE)
prio = PORT_BASED_PRIORITY_BASE;
hw->ksz_switch->port_cfg[port].port_prio = prio;
port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
data &= ~PORT_BASED_PRIORITY_MASK;
data |= prio << PORT_BASED_PRIORITY_SHIFT;
port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
}
/**
* sw_dis_multi_queue - disable transmit multiple queues
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the transmit multiple queues selection of the switch
* port. Only single transmit queue on the port.
*/
static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
{
port_cfg_prio(hw, port, 0);
}
/**
* sw_init_prio - initialize switch priority
* @hw: The hardware instance.
*
* This routine initializes the switch QoS priority functions.
*/
static void sw_init_prio(struct ksz_hw *hw)
{
int port;
int tos;
struct ksz_switch *sw = hw->ksz_switch;
/*
* Init all the 802.1p tag priority value to be assigned to different
* priority queue.
*/
sw->p_802_1p[0] = 0;
sw->p_802_1p[1] = 0;
sw->p_802_1p[2] = 1;
sw->p_802_1p[3] = 1;
sw->p_802_1p[4] = 2;
sw->p_802_1p[5] = 2;
sw->p_802_1p[6] = 3;
sw->p_802_1p[7] = 3;
/*
* Init all the DiffServ priority value to be assigned to priority
* queue 0.
*/
for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
sw->diffserv[tos] = 0;
/* All QoS functions disabled. */
for (port = 0; port < TOTAL_PORT_NUM; port++) {
sw_dis_multi_queue(hw, port);
sw_dis_diffserv(hw, port);
sw_dis_802_1p(hw, port);
sw_cfg_replace_vid(hw, port, 0);
sw->port_cfg[port].port_prio = 0;
sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
}
sw_cfg_replace_null_vid(hw, 0);
}
/**
* port_get_def_vid - get port default VID.
* @hw: The hardware instance.
* @port: The port index.
* @vid: Buffer to store the VID.
*
* This routine retrieves the default VID of the port.
*/
static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_CTRL_VID_OFFSET;
*vid = readw(hw->io + addr);
}
/**
* sw_init_vlan - initialize switch VLAN
* @hw: The hardware instance.
*
* This routine initializes the VLAN function of the switch.
*/
static void sw_init_vlan(struct ksz_hw *hw)
{
int port;
int entry;
struct ksz_switch *sw = hw->ksz_switch;
/* Read 16 VLAN entries from device's VLAN table. */
for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
sw_r_vlan_table(hw, entry,
&sw->vlan_table[entry].vid,
&sw->vlan_table[entry].fid,
&sw->vlan_table[entry].member);
}
for (port = 0; port < TOTAL_PORT_NUM; port++) {
port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
sw->port_cfg[port].member = PORT_MASK;
}
}
/**
* sw_cfg_port_base_vlan - configure port-based VLAN membership
* @hw: The hardware instance.
* @port: The port index.
* @member: The port-based VLAN membership.
*
* This routine configures the port-based VLAN membership of the port.
*/
static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
{
u32 addr;
u8 data;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_CTRL_2_OFFSET;
data = readb(hw->io + addr);
data &= ~PORT_VLAN_MEMBERSHIP;
data |= (member & PORT_MASK);
writeb(data, hw->io + addr);
hw->ksz_switch->port_cfg[port].member = member;
}
/**
* sw_get_addr - get the switch MAC address.
* @hw: The hardware instance.
* @mac_addr: Buffer to store the MAC address.
*
* This function retrieves the MAC address of the switch.
*/
static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < 6; i += 2) {
mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
}
}
/**
* sw_set_addr - configure switch MAC address
* @hw: The hardware instance.
* @mac_addr: The MAC address.
*
* This function configures the MAC address of the switch.
*/
static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < 6; i += 2) {
writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
}
}
/**
* sw_set_global_ctrl - set switch global control
* @hw: The hardware instance.
*
* This routine sets the global control of the switch function.
*/
static void sw_set_global_ctrl(struct ksz_hw *hw)
{
u16 data;
/* Enable switch MII flow control. */
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data |= SWITCH_FLOW_CTRL;
writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
/* Enable aggressive back off algorithm in half duplex mode. */
data |= SWITCH_AGGR_BACKOFF;
/* Enable automatic fast aging when link changed detected. */
data |= SWITCH_AGING_ENABLE;
data |= SWITCH_LINK_AUTO_AGING;
if (hw->overrides & FAST_AGING)
data |= SWITCH_FAST_AGING;
else
data &= ~SWITCH_FAST_AGING;
writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
/* Enable no excessive collision drop. */
data |= NO_EXC_COLLISION_DROP;
writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
}
enum {
STP_STATE_DISABLED = 0,
STP_STATE_LISTENING,
STP_STATE_LEARNING,
STP_STATE_FORWARDING,
STP_STATE_BLOCKED,
STP_STATE_SIMPLE
};
/**
* port_set_stp_state - configure port spanning tree state
* @hw: The hardware instance.
* @port: The port index.
* @state: The spanning tree state.
*
* This routine configures the spanning tree state of the port.
*/
static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
{
u16 data;
port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
switch (state) {
case STP_STATE_DISABLED:
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_LISTENING:
/*
* No need to turn on transmit because of port direct mode.
* Turning on receive is required if static MAC table is not setup.
*/
data &= ~PORT_TX_ENABLE;
data |= PORT_RX_ENABLE;
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_LEARNING:
data &= ~PORT_TX_ENABLE;
data |= PORT_RX_ENABLE;
data &= ~PORT_LEARN_DISABLE;
break;
case STP_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
data &= ~PORT_LEARN_DISABLE;
break;
case STP_STATE_BLOCKED:
/*
* Need to setup static MAC table with override to keep receiving BPDU
* messages. See sw_init_stp routine.
*/
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_SIMPLE:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
}
port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
hw->ksz_switch->port_cfg[port].stp_state = state;
}
#define STP_ENTRY 0
#define BROADCAST_ENTRY 1
#define BRIDGE_ADDR_ENTRY 2
#define IPV6_ADDR_ENTRY 3
/**
* sw_clr_sta_mac_table - clear static MAC table
* @hw: The hardware instance.
*
* This routine clears the static MAC table.
*/
static void sw_clr_sta_mac_table(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
int i;
for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
entry = &hw->ksz_switch->mac_table[i];
sw_w_sta_mac_table(hw, i,
entry->mac_addr, entry->ports,
entry->override, 0,
entry->use_fid, entry->fid);
}
}
/**
* sw_init_stp - initialize switch spanning tree support
* @hw: The hardware instance.
*
* This routine initializes the spanning tree support of the switch.
*/
static void sw_init_stp(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
entry = &hw->ksz_switch->mac_table[STP_ENTRY];
entry->mac_addr[0] = 0x01;
entry->mac_addr[1] = 0x80;
entry->mac_addr[2] = 0xC2;
entry->mac_addr[3] = 0x00;
entry->mac_addr[4] = 0x00;
entry->mac_addr[5] = 0x00;
entry->ports = HOST_MASK;
entry->override = 1;
entry->valid = 1;
sw_w_sta_mac_table(hw, STP_ENTRY,
entry->mac_addr, entry->ports,
entry->override, entry->valid,
entry->use_fid, entry->fid);
}
/**
* sw_block_addr - block certain packets from the host port
* @hw: The hardware instance.
*
* This routine blocks certain packets from reaching to the host port.
*/
static void sw_block_addr(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
int i;
for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
entry = &hw->ksz_switch->mac_table[i];
entry->valid = 0;
sw_w_sta_mac_table(hw, i,
entry->mac_addr, entry->ports,
entry->override, entry->valid,
entry->use_fid, entry->fid);
}
}
#define PHY_LINK_SUPPORT \
(PHY_AUTO_NEG_ASYM_PAUSE | \
PHY_AUTO_NEG_SYM_PAUSE | \
PHY_AUTO_NEG_100BT4 | \
PHY_AUTO_NEG_100BTX_FD | \
PHY_AUTO_NEG_100BTX | \
PHY_AUTO_NEG_10BT_FD | \
PHY_AUTO_NEG_10BT)
static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
}
static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
}
static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
}
static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
}
static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
}
static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
}
/**
* hw_r_phy - read data from PHY register
* @hw: The hardware instance.
* @port: Port to read.
* @reg: PHY register to read.
* @val: Buffer to store the read data.
*
* This routine reads data from the PHY register.
*/
static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
{
int phy;
phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
*val = readw(hw->io + phy);
}
/**
* port_w_phy - write data to PHY register
* @hw: The hardware instance.
* @port: Port to write.
* @reg: PHY register to write.
* @val: Word data to write.
*
* This routine writes data to the PHY register.
*/
static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
{
int phy;
phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
writew(val, hw->io + phy);
}
/*
* EEPROM access functions
*/
#define AT93C_CODE 0
#define AT93C_WR_OFF 0x00
#define AT93C_WR_ALL 0x10
#define AT93C_ER_ALL 0x20
#define AT93C_WR_ON 0x30
#define AT93C_WRITE 1
#define AT93C_READ 2
#define AT93C_ERASE 3
#define EEPROM_DELAY 4
static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
data &= ~gpio;
writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
}
static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
data |= gpio;
writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
}
static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
return (u8)(data & gpio);
}
static void eeprom_clk(struct ksz_hw *hw)
{
raise_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
drop_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
}
static u16 spi_r(struct ksz_hw *hw)
{
int i;
u16 temp = 0;
for (i = 15; i >= 0; i--) {
raise_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
drop_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
}
return temp;
}
static void spi_w(struct ksz_hw *hw, u16 data)
{
int i;
for (i = 15; i >= 0; i--) {
(data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
}
static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
{
int i;
/* Initial start bit */
raise_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
/* AT93C operation */
for (i = 1; i >= 0; i--) {
(data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
/* Address location */
for (i = 5; i >= 0; i--) {
(reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
}
#define EEPROM_DATA_RESERVED 0
#define EEPROM_DATA_MAC_ADDR_0 1
#define EEPROM_DATA_MAC_ADDR_1 2
#define EEPROM_DATA_MAC_ADDR_2 3
#define EEPROM_DATA_SUBSYS_ID 4
#define EEPROM_DATA_SUBSYS_VEN_ID 5
#define EEPROM_DATA_PM_CAP 6
/* User defined EEPROM data */
#define EEPROM_DATA_OTHER_MAC_ADDR 9
/**
* eeprom_read - read from AT93C46 EEPROM
* @hw: The hardware instance.
* @reg: The register offset.
*
* This function reads a word from the AT93C46 EEPROM.
*
* Return the data value.
*/
static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
{
u16 data;
raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_READ, reg);
data = spi_r(hw);
drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
return data;
}
/**
* eeprom_write - write to AT93C46 EEPROM
* @hw: The hardware instance.
* @reg: The register offset.
* @data: The data value.
*
* This procedure writes a word to the AT93C46 EEPROM.
*/
static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
{
int timeout;
raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
/* Enable write. */
spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Erase the register. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_ERASE, reg);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Check operation complete. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
timeout = 8;
mdelay(2);
do {
mdelay(1);
} while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Write the register. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_WRITE, reg);
spi_w(hw, data);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Check operation complete. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
timeout = 8;
mdelay(2);
do {
mdelay(1);
} while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Disable write. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
}
/*
* Link detection routines
*/
static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
{
ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
switch (port->flow_ctrl) {
case PHY_FLOW_CTRL:
ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
break;
/* Not supported. */
case PHY_TX_ONLY:
case PHY_RX_ONLY:
default:
break;
}
return ctrl;
}
static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
{
u32 rx_cfg;
u32 tx_cfg;
rx_cfg = hw->rx_cfg;
tx_cfg = hw->tx_cfg;
if (rx)
hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
else
hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
if (tx)
hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
else
hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
if (hw->enabled) {
if (rx_cfg != hw->rx_cfg)
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
if (tx_cfg != hw->tx_cfg)
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
}
static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
u16 local, u16 remote)
{
int rx;
int tx;
if (hw->overrides & PAUSE_FLOW_CTRL)
return;
rx = tx = 0;
if (port->force_link)
rx = tx = 1;
if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
if (local & PHY_AUTO_NEG_SYM_PAUSE) {
rx = tx = 1;
} else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
(local & PHY_AUTO_NEG_PAUSE) ==
PHY_AUTO_NEG_ASYM_PAUSE) {
tx = 1;
}
} else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
rx = 1;
}
if (!hw->ksz_switch)
set_flow_ctrl(hw, rx, tx);
}
static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
struct ksz_port_info *info, u16 link_status)
{
if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
!(hw->overrides & PAUSE_FLOW_CTRL)) {
u32 cfg = hw->tx_cfg;
/* Disable flow control in the half duplex mode. */
if (1 == info->duplex)
hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
if (hw->enabled && cfg != hw->tx_cfg)
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
}
/**
* port_get_link_speed - get current link status
* @port: The port instance.
*
* This routine reads PHY registers to determine the current link status of the
* switch ports.
*/
static void port_get_link_speed(struct ksz_port *port)
{
uint interrupt;
struct ksz_port_info *info;
struct ksz_port_info *linked = NULL;
struct ksz_hw *hw = port->hw;
u16 data;
u16 status;
u8 local;
u8 remote;
int i;
int p;
int change = 0;
interrupt = hw_block_intr(hw);
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
info = &hw->port_info[p];
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
/*
* Link status is changing all the time even when there is no
* cable connection!
*/
remote = status & (PORT_AUTO_NEG_COMPLETE |
PORT_STATUS_LINK_GOOD);
local = (u8) data;
/* No change to status. */
if (local == info->advertised && remote == info->partner)
continue;
info->advertised = local;
info->partner = remote;
if (status & PORT_STATUS_LINK_GOOD) {
/* Remember the first linked port. */
if (!linked)
linked = info;
info->tx_rate = 10 * TX_RATE_UNIT;
if (status & PORT_STATUS_SPEED_100MBIT)
info->tx_rate = 100 * TX_RATE_UNIT;
info->duplex = 1;
if (status & PORT_STATUS_FULL_DUPLEX)
info->duplex = 2;
if (media_connected != info->state) {
hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
&data);
hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
&status);
determine_flow_ctrl(hw, port, data, status);
if (hw->ksz_switch) {
port_cfg_back_pressure(hw, p,
(1 == info->duplex));
}
change |= 1 << i;
port_cfg_change(hw, port, info, status);
}
info->state = media_connected;
} else {
if (media_disconnected != info->state) {
change |= 1 << i;
/* Indicate the link just goes down. */
hw->port_mib[p].link_down = 1;
}
info->state = media_disconnected;
}
hw->port_mib[p].state = (u8) info->state;
}
if (linked && media_disconnected == port->linked->state)
port->linked = linked;
hw_restore_intr(hw, interrupt);
}
#define PHY_RESET_TIMEOUT 10
/**
* port_set_link_speed - set port speed
* @port: The port instance.
*
* This routine sets the link speed of the switch ports.
*/
static void port_set_link_speed(struct ksz_port *port)
{
struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
u8 status;
int i;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
info = &hw->port_info[p];
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
cfg = 0;
if (status & PORT_STATUS_LINK_GOOD)
cfg = data;
data |= PORT_AUTO_NEG_ENABLE;
data = advertised_flow_ctrl(port, data);
data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
/* Check if manual configuration is specified by the user. */
if (port->speed || port->duplex) {
if (10 == port->speed)
data &= ~(PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_100BTX);
else if (100 == port->speed)
data &= ~(PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT);
if (1 == port->duplex)
data &= ~(PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_10BT_FD);
else if (2 == port->duplex)
data &= ~(PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT);
}
if (data != cfg) {
data |= PORT_AUTO_NEG_RESTART;
port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
}
}
}
/**
* port_force_link_speed - force port speed
* @port: The port instance.
*
* This routine forces the link speed of the switch ports.
*/
static void port_force_link_speed(struct ksz_port *port)
{
struct ksz_hw *hw = port->hw;
u16 data;
int i;
int phy;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
hw_r_phy_ctrl(hw, phy, &data);
data &= ~PHY_AUTO_NEG_ENABLE;
if (10 == port->speed)
data &= ~PHY_SPEED_100MBIT;
else if (100 == port->speed)
data |= PHY_SPEED_100MBIT;
if (1 == port->duplex)
data &= ~PHY_FULL_DUPLEX;
else if (2 == port->duplex)
data |= PHY_FULL_DUPLEX;
hw_w_phy_ctrl(hw, phy, data);
}
}
static void port_set_power_saving(struct ksz_port *port, int enable)
{
struct ksz_hw *hw = port->hw;
int i;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
port_cfg(hw, p,
KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
}
/*
* KSZ8841 power management functions
*/
/**
* hw_chk_wol_pme_status - check PMEN pin
* @hw: The hardware instance.
*
* This function is used to check PMEN pin is asserted.
*
* Return 1 if PMEN pin is asserted; otherwise, 0.
*/
static int hw_chk_wol_pme_status(struct ksz_hw *hw)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return 0;
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
}
/**
* hw_clr_wol_pme_status - clear PMEN pin
* @hw: The hardware instance.
*
* This routine is used to clear PME_Status to deassert PMEN pin.
*/
static void hw_clr_wol_pme_status(struct ksz_hw *hw)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return;
/* Clear PME_Status to deassert PMEN pin. */
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
data |= PCI_PM_CTRL_PME_STATUS;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
}
/**
* hw_cfg_wol_pme - enable or disable Wake-on-LAN
* @hw: The hardware instance.
* @set: The flag indicating whether to enable or disable.
*
* This routine is used to enable or disable Wake-on-LAN.
*/
static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return;
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
data &= ~PCI_PM_CTRL_STATE_MASK;
if (set)
data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
else
data &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
}
/**
* hw_cfg_wol - configure Wake-on-LAN features
* @hw: The hardware instance.
* @frame: The pattern frame bit.
* @set: The flag indicating whether to enable or disable.
*
* This routine is used to enable or disable certain Wake-on-LAN features.
*/
static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
{
u16 data;
data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
if (set)
data |= frame;
else
data &= ~frame;
writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
}
/**
* hw_set_wol_frame - program Wake-on-LAN pattern
* @hw: The hardware instance.
* @i: The frame index.
* @mask_size: The size of the mask.
* @mask: Mask to ignore certain bytes in the pattern.
* @frame_size: The size of the frame.
* @pattern: The frame data.
*
* This routine is used to program Wake-on-LAN pattern.
*/
static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
const u8 *mask, uint frame_size, const u8 *pattern)
{
int bits;
int from;
int len;
int to;
u32 crc;
u8 data[64];
u8 val = 0;
if (frame_size > mask_size * 8)
frame_size = mask_size * 8;
if (frame_size > 64)
frame_size = 64;
i *= 0x10;
writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
bits = len = from = to = 0;
do {
if (bits) {
if ((val & 1))
data[to++] = pattern[from];
val >>= 1;
++from;
--bits;
} else {
val = mask[len];
writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ len);
++len;
if (val)
bits = 8;
else
from += 8;
}
} while (from < (int) frame_size);
if (val) {
bits = mask[len - 1];
val <<= (from % 8);
bits &= ~val;
writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
1);
}
crc = ether_crc(to, data);
writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
}
/**
* hw_add_wol_arp - add ARP pattern
* @hw: The hardware instance.
* @ip_addr: The IPv4 address assigned to the device.
*
* This routine is used to add ARP pattern for waking up the host.
*/
static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
{
static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
u8 pattern[42] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x08, 0x06,
0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00 };
memcpy(&pattern[38], ip_addr, 4);
hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
}
/**
* hw_add_wol_bcast - add broadcast pattern
* @hw: The hardware instance.
*
* This routine is used to add broadcast pattern for waking up the host.
*/
static void hw_add_wol_bcast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
}
/**
* hw_add_wol_mcast - add multicast pattern
* @hw: The hardware instance.
*
* This routine is used to add multicast pattern for waking up the host.
*
* It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
* by IPv6 ping command. Note that multicast packets are filtred through the
* multicast hash table, so not all multicast packets can wake up the host.
*/
static void hw_add_wol_mcast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
memcpy(&pattern[3], &hw->override_addr[3], 3);
hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
}
/**
* hw_add_wol_ucast - add unicast pattern
* @hw: The hardware instance.
*
* This routine is used to add unicast pattern to wakeup the host.
*
* It is assumed the unicast packet is directed to the device, as the hardware
* can only receive them in normal case.
*/
static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
}
/**
* hw_enable_wol - enable Wake-on-LAN
* @hw: The hardware instance.
* @wol_enable: The Wake-on-LAN settings.
* @net_addr: The IPv4 address assigned to the device.
*
* This routine is used to enable Wake-on-LAN depending on driver settings.
*/
static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
{
hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
hw_add_wol_ucast(hw);
hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
hw_add_wol_mcast(hw);
hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
hw_add_wol_arp(hw, net_addr);
}
/**
* hw_init - check driver is correct for the hardware
* @hw: The hardware instance.
*
* This function checks the hardware is correct for this driver and sets the
* hardware up for proper initialization.
*
* Return number of ports or 0 if not right.
*/
static int hw_init(struct ksz_hw *hw)
{
int rc = 0;
u16 data;
u16 revision;
/* Set bus speed to 125MHz. */
writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
/* Check KSZ884x chip ID. */
data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
data &= KS884X_CHIP_ID_MASK_41;
if (REG_CHIP_ID_41 == data)
rc = 1;
else if (REG_CHIP_ID_42 == data)
rc = 2;
else
return 0;
/* Setup hardware features or bug workarounds. */
if (revision <= 1) {
hw->features |= SMALL_PACKET_TX_BUG;
if (1 == rc)
hw->features |= HALF_DUPLEX_SIGNAL_BUG;
}
return rc;
}
/**
* hw_reset - reset the hardware
* @hw: The hardware instance.
*
* This routine resets the hardware.
*/
static void hw_reset(struct ksz_hw *hw)
{
writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
/* Wait for device to reset. */
mdelay(10);
/* Write 0 to clear device reset. */
writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
}
/**
* hw_setup - setup the hardware
* @hw: The hardware instance.
*
* This routine setup the hardware for proper operation.
*/
static void hw_setup(struct ksz_hw *hw)
{
#if SET_DEFAULT_LED
u16 data;
/* Change default LED mode. */
data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
data &= ~LED_MODE;
data |= SET_DEFAULT_LED;
writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
#endif
/* Setup transmit control. */
hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
(DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
/* Setup receive control. */
hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
(DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
/* Hardware cannot handle UDP packet in IP fragments. */
hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
if (hw->all_multi)
hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
if (hw->promiscuous)
hw->rx_cfg |= DMA_RX_PROMISCUOUS;
}
/**
* hw_setup_intr - setup interrupt mask
* @hw: The hardware instance.
*
* This routine setup the interrupt mask for proper operation.
*/
static void hw_setup_intr(struct ksz_hw *hw)
{
hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
}
static void ksz_check_desc_num(struct ksz_desc_info *info)
{
#define MIN_DESC_SHIFT 2
int alloc = info->alloc;
int shift;
shift = 0;
while (!(alloc & 1)) {
shift++;
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
}
if (shift < MIN_DESC_SHIFT)
shift = MIN_DESC_SHIFT;
alloc = 1 << shift;
info->alloc = alloc;
}
info->mask = info->alloc - 1;
}
static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
{
int i;
u32 phys = desc_info->ring_phys;
struct ksz_hw_desc *desc = desc_info->ring_virt;
struct ksz_desc *cur = desc_info->ring;
struct ksz_desc *previous = NULL;
for (i = 0; i < desc_info->alloc; i++) {
cur->phw = desc++;
phys += desc_info->size;
previous = cur++;
previous->phw->next = cpu_to_le32(phys);
}
previous->phw->next = cpu_to_le32(desc_info->ring_phys);
previous->sw.buf.rx.end_of_ring = 1;
previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
desc_info->avail = desc_info->alloc;
desc_info->last = desc_info->next = 0;
desc_info->cur = desc_info->ring;
}
/**
* hw_set_desc_base - set descriptor base addresses
* @hw: The hardware instance.
* @tx_addr: The transmit descriptor base.
* @rx_addr: The receive descriptor base.
*
* This routine programs the descriptor base addresses after reset.
*/
static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
{
/* Set base address of Tx/Rx descriptors. */
writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
}
static void hw_reset_pkts(struct ksz_desc_info *info)
{
info->cur = info->ring;
info->avail = info->alloc;
info->last = info->next = 0;
}
static inline void hw_resume_rx(struct ksz_hw *hw)
{
writel(DMA_START, hw->io + KS_DMA_RX_START);
}
/**
* hw_start_rx - start receiving
* @hw: The hardware instance.
*
* This routine starts the receive function of the hardware.
*/
static void hw_start_rx(struct ksz_hw *hw)
{
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
/* Notify when the receive stops. */
hw->intr_mask |= KS884X_INT_RX_STOPPED;
writel(DMA_START, hw->io + KS_DMA_RX_START);
hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
hw->rx_stop++;
/* Variable overflows. */
if (0 == hw->rx_stop)
hw->rx_stop = 2;
}
/*
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
* This routine stops the receive function of the hardware.
*/
static void hw_stop_rx(struct ksz_hw *hw)
{
hw->rx_stop = 0;
hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
}
/**
* hw_start_tx - start transmitting
* @hw: The hardware instance.
*
* This routine starts the transmit function of the hardware.
*/
static void hw_start_tx(struct ksz_hw *hw)
{
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
/**
* hw_stop_tx - stop transmitting
* @hw: The hardware instance.
*
* This routine stops the transmit function of the hardware.
*/
static void hw_stop_tx(struct ksz_hw *hw)
{
writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
}
/**
* hw_disable - disable hardware
* @hw: The hardware instance.
*
* This routine disables the hardware.
*/
static void hw_disable(struct ksz_hw *hw)
{
hw_stop_rx(hw);
hw_stop_tx(hw);
hw->enabled = 0;
}
/**
* hw_enable - enable hardware
* @hw: The hardware instance.
*
* This routine enables the hardware.
*/
static void hw_enable(struct ksz_hw *hw)
{
hw_start_tx(hw);
hw_start_rx(hw);
hw->enabled = 1;
}
/**
* hw_alloc_pkt - allocate enough descriptors for transmission
* @hw: The hardware instance.
* @length: The length of the packet.
* @physical: Number of descriptors required.
*
* This function allocates descriptors for transmission.
*
* Return 0 if not successful; 1 for buffer copy; or number of descriptors.
*/
static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
{
/* Always leave one descriptor free. */
if (hw->tx_desc_info.avail <= 1)
return 0;
/* Allocate a descriptor for transmission and mark it current. */
get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
/* Keep track of number of transmit descriptors used so far. */
++hw->tx_int_cnt;
hw->tx_size += length;
/* Cannot hold on too much data. */
if (hw->tx_size >= MAX_TX_HELD_SIZE)
hw->tx_int_cnt = hw->tx_int_mask + 1;
if (physical > hw->tx_desc_info.avail)
return 1;
return hw->tx_desc_info.avail;
}
/**
* hw_send_pkt - mark packet for transmission
* @hw: The hardware instance.
*
* This routine marks the packet for transmission in PCI version.
*/
static void hw_send_pkt(struct ksz_hw *hw)
{
struct ksz_desc *cur = hw->tx_desc_info.cur;
cur->sw.buf.tx.last_seg = 1;
/* Interrupt only after specified number of descriptors used. */
if (hw->tx_int_cnt > hw->tx_int_mask) {
cur->sw.buf.tx.intr = 1;
hw->tx_int_cnt = 0;
hw->tx_size = 0;
}
/* KSZ8842 supports port directed transmission. */
cur->sw.buf.tx.dest_port = hw->dst_ports;
release_desc(cur);
writel(0, hw->io + KS_DMA_TX_START);
}
static int empty_addr(u8 *addr)
{
u32 *addr1 = (u32 *) addr;
u16 *addr2 = (u16 *) &addr[4];
return 0 == *addr1 && 0 == *addr2;
}
/**
* hw_set_addr - set MAC address
* @hw: The hardware instance.
*
* This routine programs the MAC address of the hardware when the address is
* overrided.
*/
static void hw_set_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
sw_set_addr(hw, hw->override_addr);
}
/**
* hw_read_addr - read MAC address
* @hw: The hardware instance.
*
* This routine retrieves the MAC address of the hardware.
*/
static void hw_read_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
if (empty_addr(hw->override_addr)) {
memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
ETH_ALEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
}
}
static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
{
int i;
u32 mac_addr_lo;
u32 mac_addr_hi;
mac_addr_hi = 0;
for (i = 0; i < 2; i++) {
mac_addr_hi <<= 8;
mac_addr_hi |= mac_addr[i];
}
mac_addr_hi |= ADD_ADDR_ENABLE;
mac_addr_lo = 0;
for (i = 2; i < 6; i++) {
mac_addr_lo <<= 8;
mac_addr_lo |= mac_addr[i];
}
index *= ADD_ADDR_INCR;
writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
}
static void hw_set_add_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
if (empty_addr(hw->address[i]))
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
else
hw_ena_add_addr(hw, i, hw->address[i]);
}
}
static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
memcpy(hw->address[j], mac_addr, ETH_ALEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
return -1;
}
static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < hw->addr_list_size; i++) {
if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
}
}
return -1;
}
/**
* hw_clr_multicast - clear multicast addresses
* @hw: The hardware instance.
*
* This routine removes all multicast addresses set in the hardware.
*/
static void hw_clr_multicast(struct ksz_hw *hw)
{
int i;
for (i = 0; i < HW_MULTICAST_SIZE; i++) {
hw->multi_bits[i] = 0;
writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
}
}
/**
* hw_set_grp_addr - set multicast addresses
* @hw: The hardware instance.
*
* This routine programs multicast addresses for the hardware to accept those
* addresses.
*/
static void hw_set_grp_addr(struct ksz_hw *hw)
{
int i;
int index;
int position;
int value;
memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
for (i = 0; i < hw->multi_list_size; i++) {
position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
index = position >> 3;
value = 1 << (position & 7);
hw->multi_bits[index] |= (u8) value;
}
for (i = 0; i < HW_MULTICAST_SIZE; i++)
writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
i);
}
/**
* hw_set_multicast - enable or disable all multicast receiving
* @hw: The hardware instance.
* @multicast: To turn on or off the all multicast feature.
*
* This routine enables/disables the hardware to accept all multicast packets.
*/
static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
{
/* Stop receiving for reconfiguration. */
hw_stop_rx(hw);
if (multicast)
hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
else
hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
if (hw->enabled)
hw_start_rx(hw);
}
/**
* hw_set_promiscuous - enable or disable promiscuous receiving
* @hw: The hardware instance.
* @prom: To turn on or off the promiscuous feature.
*
* This routine enables/disables the hardware to accept all packets.
*/
static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
{
/* Stop receiving for reconfiguration. */
hw_stop_rx(hw);
if (prom)
hw->rx_cfg |= DMA_RX_PROMISCUOUS;
else
hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
if (hw->enabled)
hw_start_rx(hw);
}
/**
* sw_enable - enable the switch
* @hw: The hardware instance.
* @enable: The flag to enable or disable the switch
*
* This routine is used to enable/disable the switch in KSZ8842.
*/
static void sw_enable(struct ksz_hw *hw, int enable)
{
int port;
for (port = 0; port < SWITCH_PORT_NUM; port++) {
if (hw->dev_count > 1) {
/* Set port-base vlan membership with host port. */
sw_cfg_port_base_vlan(hw, port,
HOST_MASK | (1 << port));
port_set_stp_state(hw, port, STP_STATE_DISABLED);
} else {
sw_cfg_port_base_vlan(hw, port, PORT_MASK);
port_set_stp_state(hw, port, STP_STATE_FORWARDING);
}
}
if (hw->dev_count > 1)
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
else
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
if (enable)
enable = KS8842_START;
writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
}
/**
* sw_setup - setup the switch
* @hw: The hardware instance.
*
* This routine setup the hardware switch engine for default operation.
*/
static void sw_setup(struct ksz_hw *hw)
{
int port;
sw_set_global_ctrl(hw);
/* Enable switch broadcast storm protection at 10% percent rate. */
sw_init_broad_storm(hw);
hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
for (port = 0; port < SWITCH_PORT_NUM; port++)
sw_ena_broad_storm(hw, port);
sw_init_prio(hw);
sw_init_mirror(hw);
sw_init_prio_rate(hw);
sw_init_vlan(hw);
if (hw->features & STP_SUPPORT)
sw_init_stp(hw);
if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
hw->overrides |= PAUSE_FLOW_CTRL;
sw_enable(hw, 1);
}
/**
* ksz_start_timer - start kernel timer
* @info: Kernel timer information.
* @time: The time tick.
*
* This routine starts the kernel timer after the specified time tick.
*/
static void ksz_start_timer(struct ksz_timer_info *info, int time)
{
info->cnt = 0;
info->timer.expires = jiffies + time;
add_timer(&info->timer);
/* infinity */
info->max = -1;
}
/**
* ksz_stop_timer - stop kernel timer
* @info: Kernel timer information.
*
* This routine stops the kernel timer.
*/
static void ksz_stop_timer(struct ksz_timer_info *info)
{
if (info->max) {
info->max = 0;
del_timer_sync(&info->timer);
}
}
static void ksz_init_timer(struct ksz_timer_info *info, int period,
void (*function)(unsigned long), void *data)
{
info->max = 0;
info->period = period;
init_timer(&info->timer);
info->timer.function = function;
info->timer.data = (unsigned long) data;
}
static void ksz_update_timer(struct ksz_timer_info *info)
{
++info->cnt;
if (info->max > 0) {
if (info->cnt < info->max) {
info->timer.expires = jiffies + info->period;
add_timer(&info->timer);
} else
info->max = 0;
} else if (info->max < 0) {
info->timer.expires = jiffies + info->period;
add_timer(&info->timer);
}
}
/**
* ksz_alloc_soft_desc - allocate software descriptors
* @desc_info: Descriptor information structure.
* @transmit: Indication that descriptors are for transmit.
*
* This local function allocates software descriptors for manipulation in
* memory.
*
* Return 0 if successful.
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
GFP_KERNEL);
if (!desc_info->ring)
return 1;
hw_init_desc(desc_info, transmit);
return 0;
}
/**
* ksz_alloc_desc - allocate hardware descriptors
* @adapter: Adapter information structure.
*
* This local function allocates hardware descriptors for receiving and
* transmitting.
*
* Return 0 if successful.
*/
static int ksz_alloc_desc(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
int offset;
/* Allocate memory for RX & TX descriptors. */
adapter->desc_pool.alloc_size =
hw->rx_desc_info.size * hw->rx_desc_info.alloc +
hw->tx_desc_info.size * hw->tx_desc_info.alloc +
DESC_ALIGNMENT;
adapter->desc_pool.alloc_virt =
pci_alloc_consistent(
adapter->pdev, adapter->desc_pool.alloc_size,
&adapter->desc_pool.dma_addr);
if (adapter->desc_pool.alloc_virt == NULL) {
adapter->desc_pool.alloc_size = 0;
return 1;
}
memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
/* Align to the next cache line boundary. */
offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
(DESC_ALIGNMENT -
((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
/* Allocate receive/transmit descriptors. */
hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
adapter->desc_pool.virt;
hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
(adapter->desc_pool.virt + offset);
hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
return 1;
if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
return 1;
return 0;
}
/**
* free_dma_buf - release DMA buffer resources
* @adapter: Adapter information structure.
*
* This routine is just a helper function to release the DMA buffer resources.
*/
static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
int direction)
{
pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
dev_kfree_skb(dma_buf->skb);
dma_buf->skb = NULL;
dma_buf->dma = 0;
}
/**
* ksz_init_rx_buffers - initialize receive descriptors
* @adapter: Adapter information structure.
*
* This routine initializes DMA buffers for receiving.
*/
static void ksz_init_rx_buffers(struct dev_info *adapter)
{
int i;
struct ksz_desc *desc;
struct ksz_dma_buf *dma_buf;
struct ksz_hw *hw = &adapter->hw;
struct ksz_desc_info *info = &hw->rx_desc_info;
for (i = 0; i < hw->rx_desc_info.alloc; i++) {
get_rx_pkt(info, &desc);
dma_buf = DMA_BUFFER(desc);
if (dma_buf->skb && dma_buf->len != adapter->mtu)
free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
if (dma_buf->skb && !dma_buf->dma) {
dma_buf->skb->dev = adapter->dev;
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
}
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
set_rx_len(desc, dma_buf->len);
release_desc(desc);
}
}
/**
* ksz_alloc_mem - allocate memory for hardware descriptors
* @adapter: Adapter information structure.
*
* This function allocates memory for use by hardware descriptors for receiving
* and transmitting.
*
* Return 0 if successful.
*/
static int ksz_alloc_mem(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
/* Determine the number of receive and transmit descriptors. */
hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
/* Determine how many descriptors to skip transmit interrupt. */
hw->tx_int_cnt = 0;
hw->tx_int_mask = NUM_OF_TX_DESC / 4;
if (hw->tx_int_mask > 8)
hw->tx_int_mask = 8;
while (hw->tx_int_mask) {
hw->tx_int_cnt++;
hw->tx_int_mask >>= 1;
}
if (hw->tx_int_cnt) {
hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
hw->tx_int_cnt = 0;
}
/* Determine the descriptor size. */
hw->rx_desc_info.size =
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
hw->tx_desc_info.size =
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
/* Allocate descriptors. */
if (ksz_alloc_desc(adapter))
return 1;
return 0;
}
/**
* ksz_free_desc - free software and hardware descriptors
* @adapter: Adapter information structure.
*
* This local routine frees the software and hardware descriptors allocated by
* ksz_alloc_desc().
*/
static void ksz_free_desc(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
/* Reset descriptor. */
hw->rx_desc_info.ring_virt = NULL;
hw->tx_desc_info.ring_virt = NULL;
hw->rx_desc_info.ring_phys = 0;
hw->tx_desc_info.ring_phys = 0;
/* Free memory. */
if (adapter->desc_pool.alloc_virt)
pci_free_consistent(
adapter->pdev,
adapter->desc_pool.alloc_size,
adapter->desc_pool.alloc_virt,
adapter->desc_pool.dma_addr);
/* Reset resource pool. */
adapter->desc_pool.alloc_size = 0;
adapter->desc_pool.alloc_virt = NULL;
kfree(hw->rx_desc_info.ring);
hw->rx_desc_info.ring = NULL;
kfree(hw->tx_desc_info.ring);
hw->tx_desc_info.ring = NULL;
}
/**
* ksz_free_buffers - free buffers used in the descriptors
* @adapter: Adapter information structure.
* @desc_info: Descriptor information structure.
*
* This local routine frees buffers used in the DMA buffers.
*/
static void ksz_free_buffers(struct dev_info *adapter,
struct ksz_desc_info *desc_info, int direction)
{
int i;
struct ksz_dma_buf *dma_buf;
struct ksz_desc *desc = desc_info->ring;
for (i = 0; i < desc_info->alloc; i++) {
dma_buf = DMA_BUFFER(desc);
if (dma_buf->skb)
free_dma_buf(adapter, dma_buf, direction);
desc++;
}
}
/**
* ksz_free_mem - free all resources used by descriptors
* @adapter: Adapter information structure.
*
* This local routine frees all the resources allocated by ksz_alloc_mem().
*/
static void ksz_free_mem(struct dev_info *adapter)
{
/* Free transmit buffers. */
ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
PCI_DMA_TODEVICE);
/* Free receive buffers. */
ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
PCI_DMA_FROMDEVICE);
/* Free descriptors. */
ksz_free_desc(adapter);
}
static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
u64 *counter)
{
int i;
int mib;
int port;
struct ksz_port_mib *port_mib;
memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
for (i = 0, port = first; i < cnt; i++, port++) {
port_mib = &hw->port_mib[port];
for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
counter[mib] += port_mib->counter[mib];
}
}
/**
* send_packet - send packet
* @skb: Socket buffer.
* @dev: Network device.
*
* This routine is used to send a packet out to the network.
*/
static void send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct ksz_desc *desc;
struct ksz_desc *first;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_desc_info *info = &hw->tx_desc_info;
struct ksz_dma_buf *dma_buf;
int len;
int last_frag = skb_shinfo(skb)->nr_frags;
/*
* KSZ8842 with multiple device interfaces needs to be told which port
* to send.
*/
if (hw->dev_count > 1)
hw->dst_ports = 1 << priv->port.first_port;
/* Hardware will pad the length to 60. */
len = skb->len;
/* Remember the very first descriptor. */
first = info->cur;
desc = first;
dma_buf = DMA_BUFFER(desc);
if (last_frag) {
int frag;
skb_frag_t *this_frag;
dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
frag = 0;
do {
this_frag = &skb_shinfo(skb)->frags[frag];
/* Get a new descriptor. */
get_tx_pkt(info, &desc);
/* Keep track of descriptors used so far. */
++hw->tx_int_cnt;
dma_buf = DMA_BUFFER(desc);
dma_buf->len = skb_frag_size(this_frag);
dma_buf->dma = pci_map_single(
hw_priv->pdev,
skb_frag_address(this_frag),
dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
frag++;
if (frag == last_frag)
break;
/* Do not release the last descriptor here. */
release_desc(desc);
} while (1);
/* current points to the last descriptor. */
info->cur = desc;
/* Release the first descriptor. */
release_desc(first);
} else {
dma_buf->len = len;
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
(desc)->sw.buf.tx.csum_gen_tcp = 1;
(desc)->sw.buf.tx.csum_gen_udp = 1;
}
/*
* The last descriptor holds the packet so that it can be returned to
* network subsystem after all descriptors are transmitted.
*/
dma_buf->skb = skb;
hw_send_pkt(hw);
/* Update transmit statistics. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
}
/**
* transmit_cleanup - clean up transmit descriptors
* @dev: Network device.
*
* This routine is called to clean up the transmitted buffers.
*/
static void transmit_cleanup(struct dev_info *hw_priv, int normal)
{
int last;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_desc_info *info = &hw->tx_desc_info;
struct ksz_desc *desc;
struct ksz_dma_buf *dma_buf;
struct net_device *dev = NULL;
spin_lock(&hw_priv->hwlock);
last = info->last;
while (info->avail < info->alloc) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[last];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.tx.hw_owned) {
if (normal)
break;
else
reset_desc(desc, status);
}
dma_buf = DMA_BUFFER(desc);
pci_unmap_single(
hw_priv->pdev, dma_buf->dma, dma_buf->len,
PCI_DMA_TODEVICE);
/* This descriptor contains the last buffer in the packet. */
if (dma_buf->skb) {
dev = dma_buf->skb->dev;
/* Release the packet back to network subsystem. */
dev_kfree_skb_irq(dma_buf->skb);
dma_buf->skb = NULL;
}
/* Free the transmitted descriptor. */
last++;
last &= info->mask;
info->avail++;
}
info->last = last;
spin_unlock(&hw_priv->hwlock);
/* Notify the network subsystem that the packet has been sent. */
if (dev)
dev->trans_start = jiffies;
}
/**
* transmit_done - transmit done processing
* @dev: Network device.
*
* This routine is called when the transmit interrupt is triggered, indicating
* either a packet is sent successfully or there are transmit errors.
*/
static void tx_done(struct dev_info *hw_priv)
{
struct ksz_hw *hw = &hw_priv->hw;
int port;
transmit_cleanup(hw_priv, 1);
for (port = 0; port < hw->dev_count; port++) {
struct net_device *dev = hw->port_info[port].pdev;
if (netif_running(dev) && netif_queue_stopped(dev))
netif_wake_queue(dev);
}
}
static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
{
skb->dev = old->dev;
skb->protocol = old->protocol;
skb->ip_summed = old->ip_summed;
skb->csum = old->csum;
skb_set_network_header(skb, ETH_HLEN);
dev_kfree_skb(old);
}
/**
* netdev_tx - send out packet
* @skb: Socket buffer.
* @dev: Network device.
*
* This function is used by the upper network layer to send out a packet.
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int left;
int num = 1;
int rc = 0;
if (hw->features & SMALL_PACKET_TX_BUG) {
struct sk_buff *org_skb = skb;
if (skb->len <= 48) {
if (skb_end_pointer(skb) - skb->data >= 50) {
memset(&skb->data[skb->len], 0, 50 - skb->len);
skb->len = 50;
} else {
skb = netdev_alloc_skb(dev, 50);
if (!skb)
return NETDEV_TX_BUSY;
memcpy(skb->data, org_skb->data, org_skb->len);
memset(&skb->data[org_skb->len], 0,
50 - org_skb->len);
skb->len = 50;
copy_old_skb(org_skb, skb);
}
}
}
spin_lock_irq(&hw_priv->hwlock);
num = skb_shinfo(skb)->nr_frags + 1;
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
((CHECKSUM_PARTIAL == skb->ip_summed) &&
(ETH_P_IPV6 == htons(skb->protocol)))) {
struct sk_buff *org_skb = skb;
skb = netdev_alloc_skb(dev, org_skb->len);
if (!skb) {
rc = NETDEV_TX_BUSY;
goto unlock;
}
skb_copy_and_csum_dev(org_skb, skb->data);
org_skb->ip_summed = CHECKSUM_NONE;
skb->len = org_skb->len;
copy_old_skb(org_skb, skb);
}
send_packet(skb, dev);
if (left <= num)
netif_stop_queue(dev);
} else {
/* Stop the transmit queue until packet is allocated. */
netif_stop_queue(dev);
rc = NETDEV_TX_BUSY;
}
unlock:
spin_unlock_irq(&hw_priv->hwlock);
return rc;
}
/**
* netdev_tx_timeout - transmit timeout processing
* @dev: Network device.
*
* This routine is called when the transmit timer expires. That indicates the
* hardware is not running correctly because transmit interrupts are not
* triggered to free up resources so that the transmit routine can continue
* sending out packets. The hardware is reset to correct the problem.
*/
static void netdev_tx_timeout(struct net_device *dev)
{
static unsigned long last_reset;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int port;
if (hw->dev_count > 1) {
/*
* Only reset the hardware if time between calls is long
* enough.
*/
if (jiffies - last_reset <= dev->watchdog_timeo)
hw_priv = NULL;
}
last_reset = jiffies;
if (hw_priv) {
hw_dis_intr(hw);
hw_disable(hw);
transmit_cleanup(hw_priv, 0);
hw_reset_pkts(&hw->rx_desc_info);
hw_reset_pkts(&hw->tx_desc_info);
ksz_init_rx_buffers(hw_priv);
hw_reset(hw);
hw_set_desc_base(hw,
hw->tx_desc_info.ring_phys,
hw->rx_desc_info.ring_phys);
hw_set_addr(hw);
if (hw->all_multi)
hw_set_multicast(hw, hw->all_multi);
else if (hw->multi_list_size)
hw_set_grp_addr(hw);
if (hw->dev_count > 1) {
hw_set_add_addr(hw);
for (port = 0; port < SWITCH_PORT_NUM; port++) {
struct net_device *port_dev;
port_set_stp_state(hw, port,
STP_STATE_DISABLED);
port_dev = hw->port_info[port].pdev;
if (netif_running(port_dev))
port_set_stp_state(hw, port,
STP_STATE_SIMPLE);
}
}
hw_enable(hw);
hw_ena_intr(hw);
}
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
static inline void csum_verified(struct sk_buff *skb)
{
unsigned short protocol;
struct iphdr *iph;
protocol = skb->protocol;
skb_reset_network_header(skb);
iph = (struct iphdr *) skb_network_header(skb);
if (protocol == htons(ETH_P_8021Q)) {
protocol = iph->tot_len;
skb_set_network_header(skb, VLAN_HLEN);
iph = (struct iphdr *) skb_network_header(skb);
}
if (protocol == htons(ETH_P_IP)) {
if (iph->protocol == IPPROTO_TCP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
struct ksz_desc *desc, union desc_stat status)
{
int packet_len;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_dma_buf *dma_buf;
struct sk_buff *skb;
int rx_status;
/* Received length includes 4-byte CRC. */
packet_len = status.rx.frame_len - 4;
dma_buf = DMA_BUFFER(desc);
pci_dma_sync_single_for_cpu(
hw_priv->pdev, dma_buf->dma, packet_len + 4,
PCI_DMA_FROMDEVICE);
do {
/* skb->data != skb->head */
skb = netdev_alloc_skb(dev, packet_len + 2);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
/*
* Align socket buffer in 4-byte boundary for better
* performance.
*/
skb_reserve(skb, 2);
memcpy(skb_put(skb, packet_len),
dma_buf->skb->data, packet_len);
} while (0);
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
csum_verified(skb);
/* Update receive statistics. */
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
rx_status = netif_rx(skb);
return 0;
}
static int dev_rcv_packets(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static int port_rcv_packets(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
if (hw->dev_count > 1) {
/* Get received port number. */
int p = HW_TO_DEV_PORT(status.rx.src_port);
dev = hw->port_info[p].pdev;
if (!netif_running(dev))
goto release_packet;
}
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static int dev_rcv_special(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
if (hw->dev_count > 1) {
/* Get received port number. */
int p = HW_TO_DEV_PORT(status.rx.src_port);
dev = hw->port_info[p].pdev;
if (!netif_running(dev))
goto release_packet;
}
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
/*
* Receive without error. With receive errors
* disabled, packets with receive errors will be
* dropped, so no need to check the error bit.
*/
if (!status.rx.error || (status.data &
KS_DESC_RX_ERROR_COND) ==
KS_DESC_RX_ERROR_TOO_LONG) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
} else {
struct dev_priv *priv = netdev_priv(dev);
/* Update receive error statistics. */
priv->port.counter[OID_COUNTER_RCV_ERROR]++;
}
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static void rx_proc_task(unsigned long data)
{
struct dev_info *hw_priv = (struct dev_info *) data;
struct ksz_hw *hw = &hw_priv->hw;
if (!hw->enabled)
return;
if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
/* In case receive process is suspended because of overrun. */
hw_resume_rx(hw);
/* tasklets are interruptible. */
spin_lock_irq(&hw_priv->hwlock);
hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
spin_unlock_irq(&hw_priv->hwlock);
} else {
hw_ack_intr(hw, KS884X_INT_RX);
tasklet_schedule(&hw_priv->rx_tasklet);
}
}
static void tx_proc_task(unsigned long data)
{
struct dev_info *hw_priv = (struct dev_info *) data;
struct ksz_hw *hw = &hw_priv->hw;
hw_ack_intr(hw, KS884X_INT_TX_MASK);
tx_done(hw_priv);
/* tasklets are interruptible. */
spin_lock_irq(&hw_priv->hwlock);
hw_turn_on_intr(hw, KS884X_INT_TX);
spin_unlock_irq(&hw_priv->hwlock);
}
static inline void handle_rx_stop(struct ksz_hw *hw)
{
/* Receive just has been stopped. */
if (0 == hw->rx_stop)
hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
else if (hw->rx_stop > 1) {
if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
hw_start_rx(hw);
} else {
hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
hw->rx_stop = 0;
}
} else
/* Receive just has been started. */
hw->rx_stop++;
}
/**
* netdev_intr - interrupt handling
* @irq: Interrupt number.
* @dev_id: Network device.
*
* This function is called by upper network layer to signal interrupt.
*
* Return IRQ_HANDLED if interrupt is handled.
*/
static irqreturn_t netdev_intr(int irq, void *dev_id)
{
uint int_enable = 0;
struct net_device *dev = (struct net_device *) dev_id;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
hw_read_intr(hw, &int_enable);
/* Not our interrupt! */
if (!int_enable)
return IRQ_NONE;
do {
hw_ack_intr(hw, int_enable);
int_enable &= hw->intr_mask;
if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
tasklet_schedule(&hw_priv->tx_tasklet);
}
if (likely(int_enable & KS884X_INT_RX)) {
hw_dis_intr_bit(hw, KS884X_INT_RX);
tasklet_schedule(&hw_priv->rx_tasklet);
}
if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
dev->stats.rx_fifo_errors++;
hw_resume_rx(hw);
}
if (unlikely(int_enable & KS884X_INT_PHY)) {
struct ksz_port *port = &priv->port;
hw->features |= LINK_INT_WORKING;
port_get_link_speed(port);
}
if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
handle_rx_stop(hw);
break;
}
if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
pr_info("Tx disabled\n");
break;
}
} while (0);
hw_ena_intr(hw);
return IRQ_HANDLED;
}
/*
* Linux network device functions
*/
static unsigned long next_jiffies;
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netdev_netpoll(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
hw_dis_intr(&hw_priv->hw);
netdev_intr(dev->irq, dev);
}
#endif
static void bridge_change(struct ksz_hw *hw)
{
int port;
u8 member;
struct ksz_switch *sw = hw->ksz_switch;
/* No ports in forwarding state. */
if (!sw->member) {
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
sw_block_addr(hw);
}
for (port = 0; port < SWITCH_PORT_NUM; port++) {
if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
member = HOST_MASK | sw->member;
else
member = HOST_MASK | (1 << port);
if (member != sw->port_cfg[port].member)
sw_cfg_port_base_vlan(hw, port, member);
}
}
/**
* netdev_close - close network device
* @dev: Network device.
*
* This function process the close operation of network device. This is caused
* by the user command "ifconfig ethX down."
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int netdev_close(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = &hw_priv->hw;
int pi;
netif_stop_queue(dev);
ksz_stop_timer(&priv->monitor_timer_info);
/* Need to shut the port manually in multiple device interfaces mode. */
if (hw->dev_count > 1) {
port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
/* Port is closed. Need to change bridge setting. */
if (hw->features & STP_SUPPORT) {
pi = 1 << port->first_port;
if (hw->ksz_switch->member & pi) {
hw->ksz_switch->member &= ~pi;
bridge_change(hw);
}
}
}
if (port->first_port > 0)
hw_del_addr(hw, dev->dev_addr);
if (!hw_priv->wol_enable)
port_set_power_saving(port, true);
if (priv->multicast)
--hw->all_multi;
if (priv->promiscuous)
--hw->promiscuous;
hw_priv->opened--;
if (!(hw_priv->opened)) {
ksz_stop_timer(&hw_priv->mib_timer_info);
flush_work(&hw_priv->mib_read);
hw_dis_intr(hw);
hw_disable(hw);
hw_clr_multicast(hw);
/* Delay for receive task to stop scheduling itself. */
msleep(2000 / HZ);
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
free_irq(dev->irq, hw_priv->dev);
transmit_cleanup(hw_priv, 0);
hw_reset_pkts(&hw->rx_desc_info);
hw_reset_pkts(&hw->tx_desc_info);
/* Clean out static MAC table when the switch is shutdown. */
if (hw->features & STP_SUPPORT)
sw_clr_sta_mac_table(hw);
}
return 0;
}
static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
{
if (hw->ksz_switch) {
u32 data;
data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
if (hw->features & RX_HUGE_FRAME)
data |= SWITCH_HUGE_PACKET;
else
data &= ~SWITCH_HUGE_PACKET;
writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
}
if (hw->features & RX_HUGE_FRAME) {
hw->rx_cfg |= DMA_RX_ERROR;
hw_priv->dev_rcv = dev_rcv_special;
} else {
hw->rx_cfg &= ~DMA_RX_ERROR;
if (hw->dev_count > 1)
hw_priv->dev_rcv = port_rcv_packets;
else
hw_priv->dev_rcv = dev_rcv_packets;
}
}
static int prepare_hardware(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int rc = 0;
/* Remember the network device that requests interrupts. */
hw_priv->dev = dev;
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
tasklet_enable(&hw_priv->rx_tasklet);
tasklet_enable(&hw_priv->tx_tasklet);
hw->promiscuous = 0;
hw->all_multi = 0;
hw->multi_list_size = 0;
hw_reset(hw);
hw_set_desc_base(hw,
hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
hw_set_addr(hw);
hw_cfg_huge_frame(hw_priv, hw);
ksz_init_rx_buffers(hw_priv);
return 0;
}
static void set_media_state(struct net_device *dev, int media_state)
{
struct dev_priv *priv = netdev_priv(dev);
if (media_state == priv->media_state)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
netif_info(priv, link, dev, "link %s\n",
media_state == priv->media_state ? "on" : "off");
}
/**
* netdev_open - open network device
* @dev: Network device.
*
* This function process the open operation of network device. This is caused
* by the user command "ifconfig ethX up."
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int netdev_open(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int i;
int p;
int rc = 0;
priv->multicast = 0;
priv->promiscuous = 0;
/* Reset device statistics. */
memset(&dev->stats, 0, sizeof(struct net_device_stats));
memset((void *) port->counter, 0,
(sizeof(u64) * OID_COUNTER_LAST));
if (!(hw_priv->opened)) {
rc = prepare_hardware(dev);
if (rc)
return rc;
for (i = 0; i < hw->mib_port_cnt; i++) {
if (next_jiffies < jiffies)
next_jiffies = jiffies + HZ * 2;
else
next_jiffies += HZ * 1;
hw_priv->counter[i].time = next_jiffies;
hw->port_mib[i].state = media_disconnected;
port_init_cnt(hw, i);
}
if (hw->ksz_switch)
hw->port_mib[HOST_PORT].state = media_connected;
else {
hw_add_wol_bcast(hw);
hw_cfg_wol_pme(hw, 0);
hw_clr_wol_pme_status(&hw_priv->hw);
}
}
port_set_power_saving(port, false);
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
/*
* Initialize to invalid value so that link detection
* is done.
*/
hw->port_info[p].partner = 0xFF;
hw->port_info[p].state = media_disconnected;
}
/* Need to open the port in multiple device interfaces mode. */
if (hw->dev_count > 1) {
port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
if (port->first_port > 0)
hw_add_addr(hw, dev->dev_addr);
}
port_get_link_speed(port);
if (port->force_link)
port_force_link_speed(port);
else
port_set_link_speed(port);
if (!(hw_priv->opened)) {
hw_setup_intr(hw);
hw_enable(hw);
hw_ena_intr(hw);
if (hw->mib_port_cnt)
ksz_start_timer(&hw_priv->mib_timer_info,
hw_priv->mib_timer_info.period);
}
hw_priv->opened++;
ksz_start_timer(&priv->monitor_timer_info,
priv->monitor_timer_info.period);
priv->media_state = port->linked->state;
set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
}
/* RX errors = rx_errors */
/* RX dropped = rx_dropped */
/* RX overruns = rx_fifo_errors */
/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
/* TX errors = tx_errors */
/* TX dropped = tx_dropped */
/* TX overruns = tx_fifo_errors */
/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
/* collisions = collisions */
/**
* netdev_query_statistics - query network device statistics
* @dev: Network device.
*
* This function returns the statistics of the network device. The device
* needs not be opened.
*
* Return network device statistics.
*/
static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = &priv->adapter->hw;
struct ksz_port_mib *mib;
int i;
int p;
dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
/* Reset to zero to add count later. */
dev->stats.multicast = 0;
dev->stats.collisions = 0;
dev->stats.rx_length_errors = 0;
dev->stats.rx_crc_errors = 0;
dev->stats.rx_frame_errors = 0;
dev->stats.tx_window_errors = 0;
for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
mib = &hw->port_mib[p];
dev->stats.multicast += (unsigned long)
mib->counter[MIB_COUNTER_RX_MULTICAST];
dev->stats.collisions += (unsigned long)
mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
dev->stats.rx_length_errors += (unsigned long)(
mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
mib->counter[MIB_COUNTER_RX_FRAGMENT] +
mib->counter[MIB_COUNTER_RX_OVERSIZE] +
mib->counter[MIB_COUNTER_RX_JABBER]);
dev->stats.rx_crc_errors += (unsigned long)
mib->counter[MIB_COUNTER_RX_CRC_ERR];
dev->stats.rx_frame_errors += (unsigned long)(
mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
dev->stats.tx_window_errors += (unsigned long)
mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
}
return &dev->stats;
}
/**
* netdev_set_mac_address - set network device MAC address
* @dev: Network device.
* @addr: Buffer of MAC address.
*
* This function is used to set the MAC address of the network device.
*
* Return 0 to indicate success.
*/
static int netdev_set_mac_address(struct net_device *dev, void *addr)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct sockaddr *mac = addr;
uint interrupt;
if (priv->port.first_port > 0)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
interrupt = hw_block_intr(hw);
if (priv->port.first_port > 0)
hw_add_addr(hw, dev->dev_addr);
else
hw_set_addr(hw);
hw_restore_intr(hw, interrupt);
return 0;
}
static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
struct ksz_hw *hw, int promiscuous)
{
if (promiscuous != priv->promiscuous) {
u8 prev_state = hw->promiscuous;
if (promiscuous)
++hw->promiscuous;
else
--hw->promiscuous;
priv->promiscuous = promiscuous;
/* Turn on/off promiscuous mode. */
if (hw->promiscuous <= 1 && prev_state <= 1)
hw_set_promiscuous(hw, hw->promiscuous);
/*
* Port is not in promiscuous mode, meaning it is released
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
port_set_stp_state(hw, port, STP_STATE_DISABLED);
port = 1 << port;
if (sw->member & port) {
sw->member &= ~port;
bridge_change(hw);
}
}
}
}
static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
int multicast)
{
if (multicast != priv->multicast) {
u8 all_multi = hw->all_multi;
if (multicast)
++hw->all_multi;
else
--hw->all_multi;
priv->multicast = multicast;
/* Turn on/off all multicast mode. */
if (hw->all_multi <= 1 && all_multi <= 1)
hw_set_multicast(hw, hw->all_multi);
}
}
/**
* netdev_set_rx_mode
* @dev: Network device.
*
* This routine is used to set multicast addresses or put the network device
* into promiscuous mode.
*/
static void netdev_set_rx_mode(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
if (hw_priv->hw.dev_count > 1)
multicast |= (dev->flags & IFF_MULTICAST);
dev_set_multicast(priv, hw, multicast);
/* Cannot use different hashes in multiple device interfaces mode. */
if (hw_priv->hw.dev_count > 1)
return;
if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
int i = 0;
/* List too big to support so turn on all multicast mode. */
if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
hw_set_multicast(hw, hw->all_multi);
}
return;
}
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
} else {
if (MAX_MULTICAST_LIST == hw->multi_list_size) {
--hw->all_multi;
hw_set_multicast(hw, hw->all_multi);
}
hw->multi_list_size = 0;
hw_clr_multicast(hw);
}
}
static int netdev_change_mtu(struct net_device *dev, int new_mtu)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int hw_mtu;
if (netif_running(dev))
return -EBUSY;
/* Cannot use different MTU in multiple device interfaces mode. */
if (hw->dev_count > 1)
if (dev != hw_priv->dev)
return 0;
if (new_mtu < 60)
return -EINVAL;
if (dev->mtu != new_mtu) {
hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
if (hw_mtu > MAX_RX_BUF_SIZE)
return -EINVAL;
if (hw_mtu > REGULAR_RX_BUF_SIZE) {
hw->features |= RX_HUGE_FRAME;
hw_mtu = MAX_RX_BUF_SIZE;
} else {
hw->features &= ~RX_HUGE_FRAME;
hw_mtu = REGULAR_RX_BUF_SIZE;
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
dev->mtu = new_mtu;
}
return 0;
}
/**
* netdev_ioctl - I/O control processing
* @dev: Network device.
* @ifr: Interface request structure.
* @cmd: I/O control code.
*
* This function is used to process I/O control calls.
*
* Return 0 to indicate success.
*/
static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int rc;
int result = 0;
struct mii_ioctl_data *data = if_mii(ifr);
if (down_interruptible(&priv->proc_sem))
return -ERESTARTSYS;
/* assume success */
rc = 0;
switch (cmd) {
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
data->phy_id = priv->id;
/* Fallthrough... */
/* Read MII PHY register. */
case SIOCGMIIREG:
if (data->phy_id != priv->id || data->reg_num >= 6)
result = -EIO;
else
hw_r_phy(hw, port->linked->port_id, data->reg_num,
&data->val_out);
break;
/* Write MII PHY register. */
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN))
result = -EPERM;
else if (data->phy_id != priv->id || data->reg_num >= 6)
result = -EIO;
else
hw_w_phy(hw, port->linked->port_id, data->reg_num,
data->val_in);
break;
default:
result = -EOPNOTSUPP;
}
up(&priv->proc_sem);
return result;
}
/*
* MII support
*/
/**
* mdio_read - read PHY register
* @dev: Network device.
* @phy_id: The PHY id.
* @reg_num: The register number.
*
* This function returns the PHY register value.
*
* Return the register value.
*/
static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = port->hw;
u16 val_out;
hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
return val_out;
}
/**
* mdio_write - set PHY register
* @dev: Network device.
* @phy_id: The PHY id.
* @reg_num: The register number.
* @val: The register value.
*
* This procedure sets the PHY register value.
*/
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = port->hw;
int i;
int pi;
for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
hw_w_phy(hw, pi, reg_num << 1, val);
}
/*
* ethtool support
*/
#define EEPROM_SIZE 0x40
static u16 eeprom_data[EEPROM_SIZE] = { 0 };
#define ADVERTISED_ALL \
(ADVERTISED_10baseT_Half | \
ADVERTISED_10baseT_Full | \
ADVERTISED_100baseT_Half | \
ADVERTISED_100baseT_Full)
/* These functions use the MII functions in mii.c. */
/**
* netdev_get_settings - get network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
* This function queries the PHY and returns its state in the ethtool command.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
mutex_lock(&hw_priv->lock);
mii_ethtool_gset(&priv->mii_if, cmd);
cmd->advertising |= SUPPORTED_TP;
mutex_unlock(&hw_priv->lock);
/* Save advertised settings for workaround in next function. */
priv->advertising = cmd->advertising;
return 0;
}
/**
* netdev_set_settings - set network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
* This function sets the PHY according to the ethtool command.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
u32 speed = ethtool_cmd_speed(cmd);
int rc;
/*
* ethtool utility does not change advertised setting if auto
* negotiation is not specified explicitly.
*/
if (cmd->autoneg && priv->advertising == cmd->advertising) {
cmd->advertising |= ADVERTISED_ALL;
if (10 == speed)
cmd->advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half);
else if (100 == speed)
cmd->advertising &=
~(ADVERTISED_10baseT_Full |
ADVERTISED_10baseT_Half);
if (0 == cmd->duplex)
cmd->advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Full);
else if (1 == cmd->duplex)
cmd->advertising &=
~(ADVERTISED_100baseT_Half |
ADVERTISED_10baseT_Half);
}
mutex_lock(&hw_priv->lock);
if (cmd->autoneg &&
(cmd->advertising & ADVERTISED_ALL) ==
ADVERTISED_ALL) {
port->duplex = 0;
port->speed = 0;
port->force_link = 0;
} else {
port->duplex = cmd->duplex + 1;
if (1000 != speed)
port->speed = speed;
if (cmd->autoneg)
port->force_link = 0;
else
port->force_link = 1;
}
rc = mii_ethtool_sset(&priv->mii_if, cmd);
mutex_unlock(&hw_priv->lock);
return rc;
}
/**
* netdev_nway_reset - restart auto-negotiation
* @dev: Network device.
*
* This function restarts the PHY for auto-negotiation.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_nway_reset(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
int rc;
mutex_lock(&hw_priv->lock);
rc = mii_nway_restart(&priv->mii_if);
mutex_unlock(&hw_priv->lock);
return rc;
}
/**
* netdev_get_link - get network device link status
* @dev: Network device.
*
* This function gets the link status from the PHY.
*
* Return true if PHY is linked and false otherwise.
*/
static u32 netdev_get_link(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
int rc;
rc = mii_link_ok(&priv->mii_if);
return rc;
}
/**
* netdev_get_drvinfo - get network driver information
* @dev: Network device.
* @info: Ethtool driver info data structure.
*
* This procedure returns the driver information.
*/
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(hw_priv->pdev),
sizeof(info->bus_info));
}
/**
* netdev_get_regs_len - get length of register dump
* @dev: Network device.
*
* This function returns the length of the register dump.
*
* Return length of the register dump.
*/
static struct hw_regs {
int start;
int end;
} hw_regs_range[] = {
{ KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
{ KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
{ KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
{ KS884X_SIDER_P, KS8842_SGCR7_P },
{ KS8842_MACAR1_P, KS8842_TOSR8_P },
{ KS884X_P1MBCR_P, KS8842_P3ERCR_P },
{ 0, 0 }
};
static int netdev_get_regs_len(struct net_device *dev)
{
struct hw_regs *range = hw_regs_range;
int regs_len = 0x10 * sizeof(u32);
while (range->end > range->start) {
regs_len += (range->end - range->start + 3) / 4 * 4;
range++;
}
return regs_len;
}
/**
* netdev_get_regs - get register dump
* @dev: Network device.
* @regs: Ethtool registers data structure.
* @ptr: Buffer to store the register values.
*
* This procedure dumps the register values in the provided buffer.
*/
static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int *buf = (int *) ptr;
struct hw_regs *range = hw_regs_range;
int len;
mutex_lock(&hw_priv->lock);
regs->version = 0;
for (len = 0; len < 0x40; len += 4) {
pci_read_config_dword(hw_priv->pdev, len, buf);
buf++;
}
while (range->end > range->start) {
for (len = range->start; len < range->end; len += 4) {
*buf = readl(hw->io + len);
buf++;
}
range++;
}
mutex_unlock(&hw_priv->lock);
}
#define WOL_SUPPORT \
(WAKE_PHY | WAKE_MAGIC | \
WAKE_UCAST | WAKE_MCAST | \
WAKE_BCAST | WAKE_ARP)
/**
* netdev_get_wol - get Wake-on-LAN support
* @dev: Network device.
* @wol: Ethtool Wake-on-LAN data structure.
*
* This procedure returns Wake-on-LAN support.
*/
static void netdev_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
wol->supported = hw_priv->wol_support;
wol->wolopts = hw_priv->wol_enable;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
/**
* netdev_set_wol - set Wake-on-LAN support
* @dev: Network device.
* @wol: Ethtool Wake-on-LAN data structure.
*
* This function sets Wake-on-LAN support.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
/* Need to find a way to retrieve the device IP address. */
static const u8 net_addr[] = { 192, 168, 1, 1 };
if (wol->wolopts & ~hw_priv->wol_support)
return -EINVAL;
hw_priv->wol_enable = wol->wolopts;
/* Link wakeup cannot really be disabled. */
if (wol->wolopts)
hw_priv->wol_enable |= WAKE_PHY;
hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
return 0;
}
/**
* netdev_get_msglevel - get debug message level
* @dev: Network device.
*
* This function returns current debug message level.
*
* Return current debug message flags.
*/
static u32 netdev_get_msglevel(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
return priv->msg_enable;
}
/**
* netdev_set_msglevel - set debug message level
* @dev: Network device.
* @value: Debug message flags.
*
* This procedure sets debug message level.
*/
static void netdev_set_msglevel(struct net_device *dev, u32 value)
{
struct dev_priv *priv = netdev_priv(dev);
priv->msg_enable = value;
}
/**
* netdev_get_eeprom_len - get EEPROM length
* @dev: Network device.
*
* This function returns the length of the EEPROM.
*
* Return length of the EEPROM.
*/
static int netdev_get_eeprom_len(struct net_device *dev)
{
return EEPROM_SIZE * 2;
}
/**
* netdev_get_eeprom - get EEPROM data
* @dev: Network device.
* @eeprom: Ethtool EEPROM data structure.
* @data: Buffer to store the EEPROM data.
*
* This function dumps the EEPROM data in the provided buffer.
*
* Return 0 if successful; otherwise an error code.
*/
#define EEPROM_MAGIC 0x10A18842
static int netdev_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
u8 *eeprom_byte = (u8 *) eeprom_data;
int i;
int len;
len = (eeprom->offset + eeprom->len + 1) / 2;
for (i = eeprom->offset / 2; i < len; i++)
eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
eeprom->magic = EEPROM_MAGIC;
memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
return 0;
}
/**
* netdev_set_eeprom - write EEPROM data
* @dev: Network device.
* @eeprom: Ethtool EEPROM data structure.
* @data: Data buffer.
*
* This function modifies the EEPROM data one byte at a time.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
u16 eeprom_word[EEPROM_SIZE];
u8 *eeprom_byte = (u8 *) eeprom_word;
int i;
int len;
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
len = (eeprom->offset + eeprom->len + 1) / 2;
for (i = eeprom->offset / 2; i < len; i++)
eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
for (i = 0; i < EEPROM_SIZE; i++)
if (eeprom_word[i] != eeprom_data[i]) {
eeprom_data[i] = eeprom_word[i];
eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
}
return 0;
}
/**
* netdev_get_pauseparam - get flow control parameters
* @dev: Network device.
* @pause: Ethtool PAUSE settings data structure.
*
* This procedure returns the PAUSE control flow settings.
*/
static void netdev_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
if (!hw->ksz_switch) {
pause->rx_pause =
(hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
pause->tx_pause =
(hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
} else {
pause->rx_pause =
(sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
pause->tx_pause =
(sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
}
}
/**
* netdev_set_pauseparam - set flow control parameters
* @dev: Network device.
* @pause: Ethtool PAUSE settings data structure.
*
* This function sets the PAUSE control flow settings.
* Not implemented yet.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
mutex_lock(&hw_priv->lock);
if (pause->autoneg) {
if (!pause->rx_pause && !pause->tx_pause)
port->flow_ctrl = PHY_NO_FLOW_CTRL;
else
port->flow_ctrl = PHY_FLOW_CTRL;
hw->overrides &= ~PAUSE_FLOW_CTRL;
port->force_link = 0;
if (hw->ksz_switch) {
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL, 1);
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL, 1);
}
port_set_link_speed(port);
} else {
hw->overrides |= PAUSE_FLOW_CTRL;
if (hw->ksz_switch) {
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL, pause->rx_pause);
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL, pause->tx_pause);
} else
set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
}
mutex_unlock(&hw_priv->lock);
return 0;
}
/**
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
* @pause: Ethtool RING settings data structure.
*
* This procedure returns the TX/RX ring settings.
*/
static void netdev_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
ring->tx_max_pending = (1 << 9);
ring->tx_pending = hw->tx_desc_info.alloc;
ring->rx_max_pending = (1 << 9);
ring->rx_pending = hw->rx_desc_info.alloc;
}
#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
static struct {
char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[STATS_LEN] = {
{ "rx_lo_priority_octets" },
{ "rx_hi_priority_octets" },
{ "rx_undersize_packets" },
{ "rx_fragments" },
{ "rx_oversize_packets" },
{ "rx_jabbers" },
{ "rx_symbol_errors" },
{ "rx_crc_errors" },
{ "rx_align_errors" },
{ "rx_mac_ctrl_packets" },
{ "rx_pause_packets" },
{ "rx_bcast_packets" },
{ "rx_mcast_packets" },
{ "rx_ucast_packets" },
{ "rx_64_or_less_octet_packets" },
{ "rx_65_to_127_octet_packets" },
{ "rx_128_to_255_octet_packets" },
{ "rx_256_to_511_octet_packets" },
{ "rx_512_to_1023_octet_packets" },
{ "rx_1024_to_1522_octet_packets" },
{ "tx_lo_priority_octets" },
{ "tx_hi_priority_octets" },
{ "tx_late_collisions" },
{ "tx_pause_packets" },
{ "tx_bcast_packets" },
{ "tx_mcast_packets" },
{ "tx_ucast_packets" },
{ "tx_deferred" },
{ "tx_total_collisions" },
{ "tx_excessive_collisions" },
{ "tx_single_collisions" },
{ "tx_mult_collisions" },
{ "rx_discards" },
{ "tx_discards" },
};
/**
* netdev_get_strings - get statistics identity strings
* @dev: Network device.
* @stringset: String set identifier.
* @buf: Buffer to store the strings.
*
* This procedure returns the strings used to identify the statistics.
*/
static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
if (ETH_SS_STATS == stringset)
memcpy(buf, ðtool_stats_keys,
ETH_GSTRING_LEN * hw->mib_cnt);
}
/**
* netdev_get_sset_count - get statistics size
* @dev: Network device.
* @sset: The statistics set number.
*
* This function returns the size of the statistics to be reported.
*
* Return size of the statistics to be reported.
*/
static int netdev_get_sset_count(struct net_device *dev, int sset)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
switch (sset) {
case ETH_SS_STATS:
return hw->mib_cnt;
default:
return -EOPNOTSUPP;
}
}
/**
* netdev_get_ethtool_stats - get network device statistics
* @dev: Network device.
* @stats: Ethtool statistics data structure.
* @data: Buffer to store the statistics.
*
* This procedure returns the statistics.
*/
static void netdev_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int n_stats = stats->n_stats;
int i;
int n;
int p;
int rc;
u64 counter[TOTAL_PORT_COUNTER_NUM];
mutex_lock(&hw_priv->lock);
n = SWITCH_PORT_NUM;
for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
if (media_connected == hw->port_mib[p].state) {
hw_priv->counter[p].read = 1;
/* Remember first port that requests read. */
if (n == SWITCH_PORT_NUM)
n = p;
}
}
mutex_unlock(&hw_priv->lock);
if (n < SWITCH_PORT_NUM)
schedule_work(&hw_priv->mib_read);
if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
p = n;
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
} else
for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
if (0 == i) {
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 2);
} else if (hw->port_mib[p].cnt_ptr) {
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
}
}
get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
n = hw->mib_cnt;
if (n > n_stats)
n = n_stats;
n_stats -= n;
for (i = 0; i < n; i++)
*data++ = counter[i];
}
/**
* netdev_set_features - set receive checksum support
* @dev: Network device.
* @features: New device features (offloads).
*
* This function sets receive checksum support setting.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_features(struct net_device *dev,
netdev_features_t features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
mutex_lock(&hw_priv->lock);
/* see note in hw_setup() */
if (features & NETIF_F_RXCSUM)
hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
else
hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
if (hw->enabled)
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
mutex_unlock(&hw_priv->lock);
return 0;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_drvinfo = netdev_get_drvinfo,
.get_regs_len = netdev_get_regs_len,
.get_regs = netdev_get_regs,
.get_wol = netdev_get_wol,
.set_wol = netdev_set_wol,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.get_eeprom_len = netdev_get_eeprom_len,
.get_eeprom = netdev_get_eeprom,
.set_eeprom = netdev_set_eeprom,
.get_pauseparam = netdev_get_pauseparam,
.set_pauseparam = netdev_set_pauseparam,
.get_ringparam = netdev_get_ringparam,
.get_strings = netdev_get_strings,
.get_sset_count = netdev_get_sset_count,
.get_ethtool_stats = netdev_get_ethtool_stats,
};
/*
* Hardware monitoring
*/
static void update_link(struct net_device *dev, struct dev_priv *priv,
struct ksz_port *port)
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
if (netif_running(dev))
set_media_state(dev, media_connected);
}
}
static void mib_read_work(struct work_struct *work)
{
struct dev_info *hw_priv =
container_of(work, struct dev_info, mib_read);
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port_mib *mib;
int i;
next_jiffies = jiffies;
for (i = 0; i < hw->mib_port_cnt; i++) {
mib = &hw->port_mib[i];
/* Reading MIB counters or requested to read. */
if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
/* Need to process receive interrupt. */
if (port_r_cnt(hw, i))
break;
hw_priv->counter[i].read = 0;
/* Finish reading counters. */
if (0 == mib->cnt_ptr) {
hw_priv->counter[i].read = 2;
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
} else if (jiffies >= hw_priv->counter[i].time) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
next_jiffies += HZ * 1 * hw->mib_port_cnt;
hw_priv->counter[i].time = next_jiffies;
/* Port is just disconnected. */
} else if (mib->link_down) {
mib->link_down = 0;
/* Read counters one last time after link is lost. */
hw_priv->counter[i].read = 1;
}
}
}
static void mib_monitor(unsigned long ptr)
{
struct dev_info *hw_priv = (struct dev_info *) ptr;
mib_read_work(&hw_priv->mib_read);
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
if (hw_priv->pme_wait <= jiffies) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
} else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
/* PME is asserted. Wait 2 seconds to clear it. */
hw_priv->pme_wait = jiffies + HZ * 2;
}
ksz_update_timer(&hw_priv->mib_timer_info);
}
/**
* dev_monitor - periodic monitoring
* @ptr: Network device pointer.
*
* This routine is run in a kernel timer to monitor the network device.
*/
static void dev_monitor(unsigned long ptr)
{
struct net_device *dev = (struct net_device *) ptr;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
if (!(hw->features & LINK_INT_WORKING))
port_get_link_speed(port);
update_link(dev, priv, port);
ksz_update_timer(&priv->monitor_timer_info);
}
/*
* Linux network device interface functions
*/
/* Driver exported variables */
static int msg_enable;
static char *macaddr = ":";
static char *mac1addr = ":";
/*
* This enables multiple network device mode for KSZ8842, which contains a
* switch with two physical ports. Some users like to take control of the
* ports for running Spanning Tree Protocol. The driver will create an
* additional eth? device for the other port.
*
* Some limitations are the network devices cannot have different MTU and
* multicast hash tables.
*/
static int multi_dev;
/*
* As most users select multiple network device mode to use Spanning Tree
* Protocol, this enables a feature in which most unicast and multicast packets
* are forwarded inside the switch and not passed to the host. Only packets
* that need the host's attention are passed to it. This prevents the host
* wasting CPU time to examine each and every incoming packets and do the
* forwarding itself.
*
* As the hack requires the private bridge header, the driver cannot compile
* with just the kernel headers.
*
* Enabling STP support also turns on multiple network device mode.
*/
static int stp;
/*
* This enables fast aging in the KSZ8842 switch. Not sure what situation
* needs that. However, fast aging is used to flush the dynamic MAC table when
* STP suport is enabled.
*/
static int fast_aging;
/**
* netdev_init - initialize network device.
* @dev: Network device.
*
* This function initializes the network device.
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int __init netdev_init(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
/* 500 ms timeout */
ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
dev_monitor, dev);
/* 500 ms timeout */
dev->watchdog_timeo = HZ / 2;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
/*
* Hardware does not really support IPv6 checksum generation, but
* driver actually runs faster with this on.
*/
dev->hw_features |= NETIF_F_IPV6_CSUM;
dev->features |= dev->hw_features;
sema_init(&priv->proc_sem, 1);
priv->mii_if.phy_id_mask = 0x1;
priv->mii_if.reg_num_mask = 0x7;
priv->mii_if.dev = dev;
priv->mii_if.mdio_read = mdio_read;
priv->mii_if.mdio_write = mdio_write;
priv->mii_if.phy_id = priv->port.first_port + 1;
priv->msg_enable = netif_msg_init(msg_enable,
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
return 0;
}
static const struct net_device_ops netdev_ops = {
.ndo_init = netdev_init,
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
.ndo_get_stats = netdev_query_statistics,
.ndo_start_xmit = netdev_tx,
.ndo_tx_timeout = netdev_tx_timeout,
.ndo_change_mtu = netdev_change_mtu,
.ndo_set_features = netdev_set_features,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_set_rx_mode = netdev_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netdev_netpoll,
#endif
};
static void netdev_free(struct net_device *dev)
{
if (dev->watchdog_timeo)
unregister_netdev(dev);
free_netdev(dev);
}
struct platform_info {
struct dev_info dev_info;
struct net_device *netdev[SWITCH_PORT_NUM];
};
static int net_device_present;
static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
{
int i;
int j;
int got_num;
int num;
i = j = num = got_num = 0;
while (j < ETH_ALEN) {
if (macaddr[i]) {
int digit;
got_num = 1;
digit = hex_to_bin(macaddr[i]);
if (digit >= 0)
num = num * 16 + digit;
else if (':' == macaddr[i])
got_num = 2;
else
break;
} else if (got_num)
got_num = 2;
else
break;
if (2 == got_num) {
if (MAIN_PORT == port) {
hw_priv->hw.override_addr[j++] = (u8) num;
hw_priv->hw.override_addr[5] +=
hw_priv->hw.id;
} else {
hw_priv->hw.ksz_switch->other_addr[j++] =
(u8) num;
hw_priv->hw.ksz_switch->other_addr[5] +=
hw_priv->hw.id;
}
num = got_num = 0;
}
i++;
}
if (ETH_ALEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
}
#define KS884X_DMA_MASK (~0x0UL)
static void read_other_addr(struct ksz_hw *hw)
{
int i;
u16 data[3];
struct ksz_switch *sw = hw->ksz_switch;
for (i = 0; i < 3; i++)
data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
sw->other_addr[5] = (u8) data[0];
sw->other_addr[4] = (u8)(data[0] >> 8);
sw->other_addr[3] = (u8) data[1];
sw->other_addr[2] = (u8)(data[1] >> 8);
sw->other_addr[1] = (u8) data[2];
sw->other_addr[0] = (u8)(data[2] >> 8);
}
}
#ifndef PCI_VENDOR_ID_MICREL_KS
#define PCI_VENDOR_ID_MICREL_KS 0x16c6
#endif
static int __devinit pcidev_init(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev;
struct dev_priv *priv;
struct dev_info *hw_priv;
struct ksz_hw *hw;
struct platform_info *info;
struct ksz_port *port;
unsigned long reg_base;
unsigned long reg_len;
int cnt;
int i;
int mib_port_count;
int pi;
int port_count;
int result;
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
if (result)
return result;
result = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return result;
reg_base = pci_resource_start(pdev, 0);
reg_len = pci_resource_len(pdev, 0);
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
return result;
if (!request_mem_region(reg_base, reg_len, DRV_NAME))
return result;
pci_set_master(pdev);
result = -ENOMEM;
info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
hw = &hw_priv->hw;
hw->io = ioremap(reg_base, reg_len);
if (!hw->io)
goto pcidev_init_io_err;
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
snprintf(banner, sizeof(banner), "%s", version);
banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
dev_info(&hw_priv->pdev->dev, "%s\n", banner);
dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
port_count = 1;
mib_port_count = 1;
hw->addr_list_size = 0;
hw->mib_cnt = PORT_COUNTER_NUM;
hw->mib_port_cnt = 1;
/* KSZ8842 has a switch with multiple ports. */
if (2 == cnt) {
if (fast_aging)
hw->overrides |= FAST_AGING;
hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
/* Multiple network device interfaces are required. */
if (multi_dev) {
hw->dev_count = SWITCH_PORT_NUM;
hw->addr_list_size = SWITCH_PORT_NUM - 1;
}
/* Single network device has multiple ports. */
if (1 == hw->dev_count) {
port_count = SWITCH_PORT_NUM;
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch)
goto pcidev_init_alloc_err;
sw = hw->ksz_switch;
}
for (i = 0; i < hw->mib_port_cnt; i++)
hw->port_mib[i].mib_start = 0;
hw->parent = hw_priv;
/* Default MTU is 1500. */
hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
if (ksz_alloc_mem(hw_priv))
goto pcidev_init_mem_err;
hw_priv->hw.id = net_device_present;
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
/* tasklet is enabled. */
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
/* tasklet_enable will decrement the atomic counter. */
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
if (macaddr[0] != ':')
get_mac_addr(hw_priv, macaddr, MAIN_PORT);
/* Read MAC address and initialize override address if not overrided. */
hw_read_addr(hw);
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
}
hw_setup(hw);
if (hw->ksz_switch)
sw_setup(hw);
else {
hw_priv->wol_support = WOL_SUPPORT;
hw_priv->wol_enable = 0;
}
INIT_WORK(&hw_priv->mib_read, mib_read_work);
/* 500 ms timeout */
ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
mib_monitor, hw_priv);
for (i = 0; i < hw->dev_count; i++) {
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev)
goto pcidev_init_reg_err;
info->netdev[i] = dev;
priv = netdev_priv(dev);
priv->adapter = hw_priv;
priv->id = net_device_present++;
port = &priv->port;
port->port_cnt = port_count;
port->mib_port_cnt = mib_port_count;
port->first_port = i;
port->flow_ctrl = PHY_FLOW_CTRL;
port->hw = hw;
port->linked = &hw->port_info[port->first_port];
for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
hw->port_info[pi].port_id = pi;
hw->port_info[pi].pdev = dev;
hw->port_info[pi].state = media_disconnected;
}
dev->mem_start = (unsigned long) hw->io;
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
ETH_ALEN);
else {
memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
if (!memcmp(sw->other_addr, hw->override_addr,
ETH_ALEN))
dev->dev_addr[5] += port->first_port;
}
dev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
}
pci_dev_get(hw_priv->pdev);
pci_set_drvdata(pdev, info);
return 0;
pcidev_init_reg_err:
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
netdev_free(info->netdev[i]);
info->netdev[i] = NULL;
}
}
pcidev_init_mem_err:
ksz_free_mem(hw_priv);
kfree(hw->ksz_switch);
pcidev_init_alloc_err:
iounmap(hw->io);
pcidev_init_io_err:
kfree(info);
pcidev_init_dev_err:
release_mem_region(reg_base, reg_len);
return result;
}
static void pcidev_exit(struct pci_dev *pdev)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
pci_set_drvdata(pdev, NULL);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
for (i = 0; i < hw_priv->hw.dev_count; i++) {
if (info->netdev[i])
netdev_free(info->netdev[i]);
}
if (hw_priv->hw.io)
iounmap(hw_priv->hw.io);
ksz_free_mem(hw_priv);
kfree(hw_priv->hw.ksz_switch);
pci_dev_put(hw_priv->pdev);
kfree(info);
}
#ifdef CONFIG_PM
static int pcidev_resume(struct pci_dev *pdev)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
if (hw_priv->wol_enable)
hw_cfg_wol_pme(hw, 0);
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
struct net_device *dev = info->netdev[i];
if (netif_running(dev)) {
netdev_open(dev);
netif_device_attach(dev);
}
}
}
return 0;
}
static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
/* Need to find a way to retrieve the device IP address. */
static const u8 net_addr[] = { 192, 168, 1, 1 };
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
struct net_device *dev = info->netdev[i];
if (netif_running(dev)) {
netif_device_detach(dev);
netdev_close(dev);
}
}
}
if (hw_priv->wol_enable) {
hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
hw_cfg_wol_pme(hw, 1);
}
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
#endif
static char pcidev_name[] = "ksz884xp";
static struct pci_device_id pcidev_table[] = {
{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, pcidev_table);
static struct pci_driver pci_device_driver = {
#ifdef CONFIG_PM
.suspend = pcidev_suspend,
.resume = pcidev_resume,
#endif
.name = pcidev_name,
.id_table = pcidev_table,
.probe = pcidev_init,
.remove = pcidev_exit
};
static int __init ksz884x_init_module(void)
{
return pci_register_driver(&pci_device_driver);
}
static void __exit ksz884x_cleanup_module(void)
{
pci_unregister_driver(&pci_device_driver);
}
module_init(ksz884x_init_module);
module_exit(ksz884x_cleanup_module);
MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
MODULE_LICENSE("GPL");
module_param_named(message, msg_enable, int, 0);
MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
module_param(macaddr, charp, 0);
module_param(mac1addr, charp, 0);
module_param(fast_aging, int, 0);
module_param(multi_dev, int, 0);
module_param(stp, int, 0);
MODULE_PARM_DESC(macaddr, "MAC address");
MODULE_PARM_DESC(mac1addr, "Second MAC address");
MODULE_PARM_DESC(fast_aging, "Fast aging");
MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
MODULE_PARM_DESC(stp, "STP support");
| gpl-2.0 |
DirtyUnicorns/android_kernel_htc_pyramid | arch/arm/mach-omap2/vc.c | 4836 | 10399 | /*
* OMAP Voltage Controller (VC) interface
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <plat/cpu.h>
#include "voltage.h"
#include "vc.h"
#include "prm-regbits-34xx.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
/**
* struct omap_vc_channel_cfg - describe the cfg_channel bitfield
* @sa: bit for slave address
* @rav: bit for voltage configuration register
* @rac: bit for command configuration register
* @racen: enable bit for RAC
* @cmd: bit for command value set selection
*
* Channel configuration bits, common for OMAP3+
* OMAP3 register: PRM_VC_CH_CONF
* OMAP4 register: PRM_VC_CFG_CHANNEL
* OMAP5 register: PRM_VC_SMPS_<voltdm>_CONFIG
*/
struct omap_vc_channel_cfg {
u8 sa;
u8 rav;
u8 rac;
u8 racen;
u8 cmd;
};
static struct omap_vc_channel_cfg vc_default_channel_cfg = {
.sa = BIT(0),
.rav = BIT(1),
.rac = BIT(2),
.racen = BIT(3),
.cmd = BIT(4),
};
/*
* On OMAP3+, all VC channels have the above default bitfield
* configuration, except the OMAP4 MPU channel. This appears
* to be a freak accident as every other VC channel has the
* default configuration, thus creating a mutant channel config.
*/
static struct omap_vc_channel_cfg vc_mutant_channel_cfg = {
.sa = BIT(0),
.rav = BIT(2),
.rac = BIT(3),
.racen = BIT(4),
.cmd = BIT(1),
};
static struct omap_vc_channel_cfg *vc_cfg_bits;
#define CFG_CHANNEL_MASK 0x1f
/**
* omap_vc_config_channel - configure VC channel to PMIC mappings
* @voltdm: pointer to voltagdomain defining the desired VC channel
*
* Configures the VC channel to PMIC mappings for the following
* PMIC settings
* - i2c slave address (SA)
* - voltage configuration address (RAV)
* - command configuration address (RAC) and enable bit (RACEN)
* - command values for ON, ONLP, RET and OFF (CMD)
*
* This function currently only allows flexible configuration of the
* non-default channel. Starting with OMAP4, there are more than 2
* channels, with one defined as the default (on OMAP4, it's MPU.)
* Only the non-default channel can be configured.
*/
static int omap_vc_config_channel(struct voltagedomain *voltdm)
{
struct omap_vc_channel *vc = voltdm->vc;
/*
* For default channel, the only configurable bit is RACEN.
* All others must stay at zero (see function comment above.)
*/
if (vc->flags & OMAP_VC_CHANNEL_DEFAULT)
vc->cfg_channel &= vc_cfg_bits->racen;
voltdm->rmw(CFG_CHANNEL_MASK << vc->cfg_channel_sa_shift,
vc->cfg_channel << vc->cfg_channel_sa_shift,
vc->cfg_channel_reg);
return 0;
}
/* Voltage scale and accessory APIs */
int omap_vc_pre_scale(struct voltagedomain *voltdm,
unsigned long target_volt,
u8 *target_vsel, u8 *current_vsel)
{
struct omap_vc_channel *vc = voltdm->vc;
u32 vc_cmdval;
/* Check if sufficient pmic info is available for this vdd */
if (!voltdm->pmic) {
pr_err("%s: Insufficient pmic info to scale the vdd_%s\n",
__func__, voltdm->name);
return -EINVAL;
}
if (!voltdm->pmic->uv_to_vsel) {
pr_err("%s: PMIC function to convert voltage in uV to"
"vsel not registered. Hence unable to scale voltage"
"for vdd_%s\n", __func__, voltdm->name);
return -ENODATA;
}
if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name);
return -EINVAL;
}
*target_vsel = voltdm->pmic->uv_to_vsel(target_volt);
*current_vsel = voltdm->pmic->uv_to_vsel(voltdm->nominal_volt);
/* Setting the ON voltage to the new target voltage */
vc_cmdval = voltdm->read(vc->cmdval_reg);
vc_cmdval &= ~vc->common->cmd_on_mask;
vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift);
voltdm->write(vc_cmdval, vc->cmdval_reg);
omap_vp_update_errorgain(voltdm, target_volt);
return 0;
}
void omap_vc_post_scale(struct voltagedomain *voltdm,
unsigned long target_volt,
u8 target_vsel, u8 current_vsel)
{
u32 smps_steps = 0, smps_delay = 0;
smps_steps = abs(target_vsel - current_vsel);
/* SMPS slew rate / step size. 2us added as buffer. */
smps_delay = ((smps_steps * voltdm->pmic->step_size) /
voltdm->pmic->slew_rate) + 2;
udelay(smps_delay);
}
/* vc_bypass_scale - VC bypass method of voltage scaling */
int omap_vc_bypass_scale(struct voltagedomain *voltdm,
unsigned long target_volt)
{
struct omap_vc_channel *vc = voltdm->vc;
u32 loop_cnt = 0, retries_cnt = 0;
u32 vc_valid, vc_bypass_val_reg, vc_bypass_value;
u8 target_vsel, current_vsel;
int ret;
ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, ¤t_vsel);
if (ret)
return ret;
vc_valid = vc->common->valid;
vc_bypass_val_reg = vc->common->bypass_val_reg;
vc_bypass_value = (target_vsel << vc->common->data_shift) |
(vc->volt_reg_addr << vc->common->regaddr_shift) |
(vc->i2c_slave_addr << vc->common->slaveaddr_shift);
voltdm->write(vc_bypass_value, vc_bypass_val_reg);
voltdm->write(vc_bypass_value | vc_valid, vc_bypass_val_reg);
vc_bypass_value = voltdm->read(vc_bypass_val_reg);
/*
* Loop till the bypass command is acknowledged from the SMPS.
* NOTE: This is legacy code. The loop count and retry count needs
* to be revisited.
*/
while (!(vc_bypass_value & vc_valid)) {
loop_cnt++;
if (retries_cnt > 10) {
pr_warning("%s: Retry count exceeded\n", __func__);
return -ETIMEDOUT;
}
if (loop_cnt > 50) {
retries_cnt++;
loop_cnt = 0;
udelay(10);
}
vc_bypass_value = voltdm->read(vc_bypass_val_reg);
}
omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel);
return 0;
}
static void __init omap3_vfsm_init(struct voltagedomain *voltdm)
{
/*
* Voltage Manager FSM parameters init
* XXX This data should be passed in from the board file
*/
voltdm->write(OMAP3_CLKSETUP, OMAP3_PRM_CLKSETUP_OFFSET);
voltdm->write(OMAP3_VOLTOFFSET, OMAP3_PRM_VOLTOFFSET_OFFSET);
voltdm->write(OMAP3_VOLTSETUP2, OMAP3_PRM_VOLTSETUP2_OFFSET);
}
static void __init omap3_vc_init_channel(struct voltagedomain *voltdm)
{
static bool is_initialized;
if (is_initialized)
return;
omap3_vfsm_init(voltdm);
is_initialized = true;
}
/* OMAP4 specific voltage init functions */
static void __init omap4_vc_init_channel(struct voltagedomain *voltdm)
{
static bool is_initialized;
u32 vc_val;
if (is_initialized)
return;
/* XXX These are magic numbers and do not belong! */
vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT);
voltdm->write(vc_val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
is_initialized = true;
}
/**
* omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data
*
* Use PMIC supplied settings for I2C high-speed mode and
* master code (if set) and program the VC I2C configuration
* register.
*
* The VC I2C configuration is common to all VC channels,
* so this function only configures I2C for the first VC
* channel registers. All other VC channels will use the
* same configuration.
*/
static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
{
struct omap_vc_channel *vc = voltdm->vc;
static bool initialized;
static bool i2c_high_speed;
u8 mcode;
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
__func__, voltdm->name, i2c_high_speed);
return;
}
i2c_high_speed = voltdm->pmic->i2c_high_speed;
if (i2c_high_speed)
voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
vc->common->i2c_cfg_hsen_mask,
vc->common->i2c_cfg_reg);
mcode = voltdm->pmic->i2c_mcode;
if (mcode)
voltdm->rmw(vc->common->i2c_mcode_mask,
mcode << __ffs(vc->common->i2c_mcode_mask),
vc->common->i2c_cfg_reg);
initialized = true;
}
void __init omap_vc_init_channel(struct voltagedomain *voltdm)
{
struct omap_vc_channel *vc = voltdm->vc;
u8 on_vsel, onlp_vsel, ret_vsel, off_vsel;
u32 val;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
return;
}
if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name);
return;
}
vc->cfg_channel = 0;
if (vc->flags & OMAP_VC_CHANNEL_CFG_MUTANT)
vc_cfg_bits = &vc_mutant_channel_cfg;
else
vc_cfg_bits = &vc_default_channel_cfg;
/* get PMIC/board specific settings */
vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr;
vc->volt_reg_addr = voltdm->pmic->volt_reg_addr;
vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr;
vc->setup_time = voltdm->pmic->volt_setup_time;
/* Configure the i2c slave address for this VC */
voltdm->rmw(vc->smps_sa_mask,
vc->i2c_slave_addr << __ffs(vc->smps_sa_mask),
vc->smps_sa_reg);
vc->cfg_channel |= vc_cfg_bits->sa;
/*
* Configure the PMIC register addresses.
*/
voltdm->rmw(vc->smps_volra_mask,
vc->volt_reg_addr << __ffs(vc->smps_volra_mask),
vc->smps_volra_reg);
vc->cfg_channel |= vc_cfg_bits->rav;
if (vc->cmd_reg_addr) {
voltdm->rmw(vc->smps_cmdra_mask,
vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask),
vc->smps_cmdra_reg);
vc->cfg_channel |= vc_cfg_bits->rac | vc_cfg_bits->racen;
}
/* Set up the on, inactive, retention and off voltage */
on_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->on_volt);
onlp_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->onlp_volt);
ret_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->ret_volt);
off_vsel = voltdm->pmic->uv_to_vsel(voltdm->pmic->off_volt);
val = ((on_vsel << vc->common->cmd_on_shift) |
(onlp_vsel << vc->common->cmd_onlp_shift) |
(ret_vsel << vc->common->cmd_ret_shift) |
(off_vsel << vc->common->cmd_off_shift));
voltdm->write(val, vc->cmdval_reg);
vc->cfg_channel |= vc_cfg_bits->cmd;
/* Channel configuration */
omap_vc_config_channel(voltdm);
/* Configure the setup times */
voltdm->rmw(voltdm->vfsm->voltsetup_mask,
vc->setup_time << __ffs(voltdm->vfsm->voltsetup_mask),
voltdm->vfsm->voltsetup_reg);
omap_vc_i2c_init(voltdm);
if (cpu_is_omap34xx())
omap3_vc_init_channel(voltdm);
else if (cpu_is_omap44xx())
omap4_vc_init_channel(voltdm);
}
| gpl-2.0 |
Red680812/android_44_KitKat_kernel_htc_dlxpul | drivers/mfd/tc3589x.c | 5092 | 9531 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License, version 2
* Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tc3589x.h>
#define TC3589x_CLKMODE_MODCTL_SLEEP 0x0
#define TC3589x_CLKMODE_MODCTL_OPERATION (1 << 0)
/**
* tc3589x_reg_read() - read a single TC3589x register
* @tc3589x: Device to read from
* @reg: Register to read
*/
int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg)
{
int ret;
ret = i2c_smbus_read_byte_data(tc3589x->i2c, reg);
if (ret < 0)
dev_err(tc3589x->dev, "failed to read reg %#x: %d\n",
reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(tc3589x_reg_read);
/**
* tc3589x_reg_read() - write a single TC3589x register
* @tc3589x: Device to write to
* @reg: Register to read
* @data: Value to write
*/
int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data)
{
int ret;
ret = i2c_smbus_write_byte_data(tc3589x->i2c, reg, data);
if (ret < 0)
dev_err(tc3589x->dev, "failed to write reg %#x: %d\n",
reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(tc3589x_reg_write);
/**
* tc3589x_block_read() - read multiple TC3589x registers
* @tc3589x: Device to read from
* @reg: First register
* @length: Number of registers
* @values: Buffer to write to
*/
int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length, u8 *values)
{
int ret;
ret = i2c_smbus_read_i2c_block_data(tc3589x->i2c, reg, length, values);
if (ret < 0)
dev_err(tc3589x->dev, "failed to read regs %#x: %d\n",
reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(tc3589x_block_read);
/**
* tc3589x_block_write() - write multiple TC3589x registers
* @tc3589x: Device to write to
* @reg: First register
* @length: Number of registers
* @values: Values to write
*/
int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length,
const u8 *values)
{
int ret;
ret = i2c_smbus_write_i2c_block_data(tc3589x->i2c, reg, length,
values);
if (ret < 0)
dev_err(tc3589x->dev, "failed to write regs %#x: %d\n",
reg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(tc3589x_block_write);
/**
* tc3589x_set_bits() - set the value of a bitfield in a TC3589x register
* @tc3589x: Device to write to
* @reg: Register to write
* @mask: Mask of bits to set
* @values: Value to set
*/
int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val)
{
int ret;
mutex_lock(&tc3589x->lock);
ret = tc3589x_reg_read(tc3589x, reg);
if (ret < 0)
goto out;
ret &= ~mask;
ret |= val;
ret = tc3589x_reg_write(tc3589x, reg, ret);
out:
mutex_unlock(&tc3589x->lock);
return ret;
}
EXPORT_SYMBOL_GPL(tc3589x_set_bits);
static struct resource gpio_resources[] = {
{
.start = TC3589x_INT_GPIIRQ,
.end = TC3589x_INT_GPIIRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource keypad_resources[] = {
{
.start = TC3589x_INT_KBDIRQ,
.end = TC3589x_INT_KBDIRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell tc3589x_dev_gpio[] = {
{
.name = "tc3589x-gpio",
.num_resources = ARRAY_SIZE(gpio_resources),
.resources = &gpio_resources[0],
},
};
static struct mfd_cell tc3589x_dev_keypad[] = {
{
.name = "tc3589x-keypad",
.num_resources = ARRAY_SIZE(keypad_resources),
.resources = &keypad_resources[0],
},
};
static irqreturn_t tc3589x_irq(int irq, void *data)
{
struct tc3589x *tc3589x = data;
int status;
again:
status = tc3589x_reg_read(tc3589x, TC3589x_IRQST);
if (status < 0)
return IRQ_NONE;
while (status) {
int bit = __ffs(status);
handle_nested_irq(tc3589x->irq_base + bit);
status &= ~(1 << bit);
}
/*
* A dummy read or write (to any register) appears to be necessary to
* have the last interrupt clear (for example, GPIO IC write) take
* effect. In such a case, recheck for any interrupt which is still
* pending.
*/
status = tc3589x_reg_read(tc3589x, TC3589x_IRQST);
if (status)
goto again;
return IRQ_HANDLED;
}
static int tc3589x_irq_init(struct tc3589x *tc3589x)
{
int base = tc3589x->irq_base;
int irq;
for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) {
irq_set_chip_data(irq, tc3589x);
irq_set_chip_and_handler(irq, &dummy_irq_chip,
handle_edge_irq);
irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
irq_set_noprobe(irq);
#endif
}
return 0;
}
static void tc3589x_irq_remove(struct tc3589x *tc3589x)
{
int base = tc3589x->irq_base;
int irq;
for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) {
#ifdef CONFIG_ARM
set_irq_flags(irq, 0);
#endif
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
}
static int tc3589x_chip_init(struct tc3589x *tc3589x)
{
int manf, ver, ret;
manf = tc3589x_reg_read(tc3589x, TC3589x_MANFCODE);
if (manf < 0)
return manf;
ver = tc3589x_reg_read(tc3589x, TC3589x_VERSION);
if (ver < 0)
return ver;
if (manf != TC3589x_MANFCODE_MAGIC) {
dev_err(tc3589x->dev, "unknown manufacturer: %#x\n", manf);
return -EINVAL;
}
dev_info(tc3589x->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
/*
* Put everything except the IRQ module into reset;
* also spare the GPIO module for any pin initialization
* done during pre-kernel boot
*/
ret = tc3589x_reg_write(tc3589x, TC3589x_RSTCTRL,
TC3589x_RSTCTRL_TIMRST
| TC3589x_RSTCTRL_ROTRST
| TC3589x_RSTCTRL_KBDRST);
if (ret < 0)
return ret;
/* Clear the reset interrupt. */
return tc3589x_reg_write(tc3589x, TC3589x_RSTINTCLR, 0x1);
}
static int __devinit tc3589x_device_init(struct tc3589x *tc3589x)
{
int ret = 0;
unsigned int blocks = tc3589x->pdata->block;
if (blocks & TC3589x_BLOCK_GPIO) {
ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio,
ARRAY_SIZE(tc3589x_dev_gpio), NULL,
tc3589x->irq_base);
if (ret) {
dev_err(tc3589x->dev, "failed to add gpio child\n");
return ret;
}
dev_info(tc3589x->dev, "added gpio block\n");
}
if (blocks & TC3589x_BLOCK_KEYPAD) {
ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad,
ARRAY_SIZE(tc3589x_dev_keypad), NULL,
tc3589x->irq_base);
if (ret) {
dev_err(tc3589x->dev, "failed to keypad child\n");
return ret;
}
dev_info(tc3589x->dev, "added keypad block\n");
}
return ret;
}
static int __devinit tc3589x_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct tc3589x_platform_data *pdata = i2c->dev.platform_data;
struct tc3589x *tc3589x;
int ret;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_I2C_BLOCK))
return -EIO;
tc3589x = kzalloc(sizeof(struct tc3589x), GFP_KERNEL);
if (!tc3589x)
return -ENOMEM;
mutex_init(&tc3589x->lock);
tc3589x->dev = &i2c->dev;
tc3589x->i2c = i2c;
tc3589x->pdata = pdata;
tc3589x->irq_base = pdata->irq_base;
tc3589x->num_gpio = id->driver_data;
i2c_set_clientdata(i2c, tc3589x);
ret = tc3589x_chip_init(tc3589x);
if (ret)
goto out_free;
ret = tc3589x_irq_init(tc3589x);
if (ret)
goto out_free;
ret = request_threaded_irq(tc3589x->i2c->irq, NULL, tc3589x_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"tc3589x", tc3589x);
if (ret) {
dev_err(tc3589x->dev, "failed to request IRQ: %d\n", ret);
goto out_removeirq;
}
ret = tc3589x_device_init(tc3589x);
if (ret) {
dev_err(tc3589x->dev, "failed to add child devices\n");
goto out_freeirq;
}
return 0;
out_freeirq:
free_irq(tc3589x->i2c->irq, tc3589x);
out_removeirq:
tc3589x_irq_remove(tc3589x);
out_free:
kfree(tc3589x);
return ret;
}
static int __devexit tc3589x_remove(struct i2c_client *client)
{
struct tc3589x *tc3589x = i2c_get_clientdata(client);
mfd_remove_devices(tc3589x->dev);
free_irq(tc3589x->i2c->irq, tc3589x);
tc3589x_irq_remove(tc3589x);
kfree(tc3589x);
return 0;
}
#ifdef CONFIG_PM
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
/* put the system to sleep mode */
if (!device_may_wakeup(&client->dev))
ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
TC3589x_CLKMODE_MODCTL_SLEEP);
return ret;
}
static int tc3589x_resume(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
struct i2c_client *client = tc3589x->i2c;
int ret = 0;
/* enable the system into operation */
if (!device_may_wakeup(&client->dev))
ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE,
TC3589x_CLKMODE_MODCTL_OPERATION);
return ret;
}
static const SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend,
tc3589x_resume);
#endif
static const struct i2c_device_id tc3589x_id[] = {
{ "tc3589x", 24 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc3589x_id);
static struct i2c_driver tc3589x_driver = {
.driver.name = "tc3589x",
.driver.owner = THIS_MODULE,
#ifdef CONFIG_PM
.driver.pm = &tc3589x_dev_pm_ops,
#endif
.probe = tc3589x_probe,
.remove = __devexit_p(tc3589x_remove),
.id_table = tc3589x_id,
};
static int __init tc3589x_init(void)
{
return i2c_add_driver(&tc3589x_driver);
}
subsys_initcall(tc3589x_init);
static void __exit tc3589x_exit(void)
{
i2c_del_driver(&tc3589x_driver);
}
module_exit(tc3589x_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TC3589x MFD core driver");
MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
| gpl-2.0 |
cwyy/linux-3.4.69 | drivers/iommu/omap-iovmm.c | 5348 | 16336 | /*
* omap iommu: simple virtual address space management
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/iommu.h>
#include <asm/cacheflush.h>
#include <asm/mach/map.h>
#include <plat/iommu.h>
#include <plat/iovmm.h>
#include <plat/iopgtable.h>
static struct kmem_cache *iovm_area_cachep;
/* return the offset of the first scatterlist entry in a sg table */
static unsigned int sgtable_offset(const struct sg_table *sgt)
{
if (!sgt || !sgt->nents)
return 0;
return sgt->sgl->offset;
}
/* return total bytes of sg buffers */
static size_t sgtable_len(const struct sg_table *sgt)
{
unsigned int i, total = 0;
struct scatterlist *sg;
if (!sgt)
return 0;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
bytes = sg->length + sg->offset;
if (!iopgsz_ok(bytes)) {
pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
__func__, i, bytes, sg->offset);
return 0;
}
if (i && sg->offset) {
pr_err("%s: sg[%d] offset not allowed in internal "
"entries\n", __func__, i);
return 0;
}
total += bytes;
}
return total;
}
#define sgtable_ok(x) (!!sgtable_len(x))
static unsigned max_alignment(u32 addr)
{
int i;
unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
;
return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
}
/*
* calculate the optimal number sg elements from total bytes based on
* iommu superpages
*/
static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
{
unsigned nr_entries = 0, ent_sz;
if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
pr_err("%s: wrong size %08x\n", __func__, bytes);
return 0;
}
while (bytes) {
ent_sz = max_alignment(da | pa);
ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
nr_entries++;
da += ent_sz;
pa += ent_sz;
bytes -= ent_sz;
}
return nr_entries;
}
/* allocate and initialize sg_table header(a kind of 'superblock') */
static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
u32 da, u32 pa)
{
unsigned int nr_entries;
int err;
struct sg_table *sgt;
if (!bytes)
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(bytes, PAGE_SIZE))
return ERR_PTR(-EINVAL);
if (flags & IOVMF_LINEAR) {
nr_entries = sgtable_nents(bytes, da, pa);
if (!nr_entries)
return ERR_PTR(-EINVAL);
} else
nr_entries = bytes / PAGE_SIZE;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
if (err) {
kfree(sgt);
return ERR_PTR(err);
}
pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
return sgt;
}
/* free sg_table header(a kind of superblock) */
static void sgtable_free(struct sg_table *sgt)
{
if (!sgt)
return;
sg_free_table(sgt);
kfree(sgt);
pr_debug("%s: sgt:%p\n", __func__, sgt);
}
/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
static void *vmap_sg(const struct sg_table *sgt)
{
u32 va;
size_t total;
unsigned int i;
struct scatterlist *sg;
struct vm_struct *new;
const struct mem_type *mtype;
mtype = get_mem_type(MT_DEVICE);
if (!mtype)
return ERR_PTR(-EINVAL);
total = sgtable_len(sgt);
if (!total)
return ERR_PTR(-EINVAL);
new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
if (!new)
return ERR_PTR(-ENOMEM);
va = (u32)new->addr;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
u32 pa;
int err;
pa = sg_phys(sg) - sg->offset;
bytes = sg->length + sg->offset;
BUG_ON(bytes != PAGE_SIZE);
err = ioremap_page(va, pa, mtype);
if (err)
goto err_out;
va += bytes;
}
flush_cache_vmap((unsigned long)new->addr,
(unsigned long)(new->addr + total));
return new->addr;
err_out:
WARN_ON(1); /* FIXME: cleanup some mpu mappings */
vunmap(new->addr);
return ERR_PTR(-EAGAIN);
}
static inline void vunmap_sg(const void *va)
{
vunmap(va);
}
static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
const u32 da)
{
struct iovm_struct *tmp;
list_for_each_entry(tmp, &obj->mmap, list) {
if ((da >= tmp->da_start) && (da < tmp->da_end)) {
size_t len;
len = tmp->da_end - tmp->da_start;
dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
__func__, tmp->da_start, da, tmp->da_end, len,
tmp->flags);
return tmp;
}
}
return NULL;
}
/**
* omap_find_iovm_area - find iovma which includes @da
* @dev: client device
* @da: iommu device virtual address
*
* Find the existing iovma starting at @da
*/
struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct iovm_struct *area;
mutex_lock(&obj->mmap_lock);
area = __find_iovm_area(obj, da);
mutex_unlock(&obj->mmap_lock);
return area;
}
EXPORT_SYMBOL_GPL(omap_find_iovm_area);
/*
* This finds the hole(area) which fits the requested address and len
* in iovmas mmap, and returns the new allocated iovma.
*/
static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
size_t bytes, u32 flags)
{
struct iovm_struct *new, *tmp;
u32 start, prev_end, alignment;
if (!obj || !bytes)
return ERR_PTR(-EINVAL);
start = da;
alignment = PAGE_SIZE;
if (~flags & IOVMF_DA_FIXED) {
/* Don't map address 0 */
start = obj->da_start ? obj->da_start : alignment;
if (flags & IOVMF_LINEAR)
alignment = iopgsz_max(bytes);
start = roundup(start, alignment);
} else if (start < obj->da_start || start > obj->da_end ||
obj->da_end - start < bytes) {
return ERR_PTR(-EINVAL);
}
tmp = NULL;
if (list_empty(&obj->mmap))
goto found;
prev_end = 0;
list_for_each_entry(tmp, &obj->mmap, list) {
if (prev_end > start)
break;
if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
goto found;
if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
start = roundup(tmp->da_end + 1, alignment);
prev_end = tmp->da_end;
}
if ((start >= prev_end) && (obj->da_end - start >= bytes))
goto found;
dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
__func__, da, bytes, flags);
return ERR_PTR(-EINVAL);
found:
new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
new->iommu = obj;
new->da_start = start;
new->da_end = start + bytes;
new->flags = flags;
/*
* keep ascending order of iovmas
*/
if (tmp)
list_add_tail(&new->list, &tmp->list);
else
list_add(&new->list, &obj->mmap);
dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
__func__, new->da_start, start, new->da_end, bytes, flags);
return new;
}
static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
{
size_t bytes;
BUG_ON(!obj || !area);
bytes = area->da_end - area->da_start;
dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
__func__, area->da_start, area->da_end, bytes, area->flags);
list_del(&area->list);
kmem_cache_free(iovm_area_cachep, area);
}
/**
* omap_da_to_va - convert (d) to (v)
* @dev: client device
* @da: iommu device virtual address
* @va: mpu virtual address
*
* Returns mpu virtual addr which corresponds to a given device virtual addr
*/
void *omap_da_to_va(struct device *dev, u32 da)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va = NULL;
struct iovm_struct *area;
mutex_lock(&obj->mmap_lock);
area = __find_iovm_area(obj, da);
if (!area) {
dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
goto out;
}
va = area->va;
out:
mutex_unlock(&obj->mmap_lock);
return va;
}
EXPORT_SYMBOL_GPL(omap_da_to_va);
static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
{
unsigned int i;
struct scatterlist *sg;
void *va = _va;
void *va_end;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
struct page *pg;
const size_t bytes = PAGE_SIZE;
/*
* iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
*/
pg = vmalloc_to_page(va);
BUG_ON(!pg);
sg_set_page(sg, pg, bytes, 0);
va += bytes;
}
va_end = _va + PAGE_SIZE * i;
}
static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
{
/*
* Actually this is not necessary at all, just exists for
* consistency of the code readability.
*/
BUG_ON(!sgt);
}
/* create 'da' <-> 'pa' mapping from 'sgt' */
static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
const struct sg_table *sgt, u32 flags)
{
int err;
unsigned int i, j;
struct scatterlist *sg;
u32 da = new->da_start;
if (!domain || !sgt)
return -EINVAL;
BUG_ON(!sgtable_ok(sgt));
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa;
size_t bytes;
pa = sg_phys(sg) - sg->offset;
bytes = sg->length + sg->offset;
flags &= ~IOVMF_PGSZ_MASK;
if (bytes_to_iopgsz(bytes) < 0)
goto err_out;
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
i, da, pa, bytes);
err = iommu_map(domain, da, pa, bytes, flags);
if (err)
goto err_out;
da += bytes;
}
return 0;
err_out:
da = new->da_start;
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes;
bytes = sg->length + sg->offset;
/* ignore failures.. we're already handling one */
iommu_unmap(domain, da, bytes);
da += bytes;
}
return err;
}
/* release 'da' <-> 'pa' mapping */
static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
struct iovm_struct *area)
{
u32 start;
size_t total = area->da_end - area->da_start;
const struct sg_table *sgt = area->sgt;
struct scatterlist *sg;
int i;
size_t unmapped;
BUG_ON(!sgtable_ok(sgt));
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
start = area->da_start;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
bytes = sg->length + sg->offset;
unmapped = iommu_unmap(domain, start, bytes);
if (unmapped < bytes)
break;
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
__func__, start, bytes, area->flags);
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
total -= bytes;
start += bytes;
}
BUG_ON(total);
}
/* template function for all unmapping */
static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
struct omap_iommu *obj, const u32 da,
void (*fn)(const void *), u32 flags)
{
struct sg_table *sgt = NULL;
struct iovm_struct *area;
if (!IS_ALIGNED(da, PAGE_SIZE)) {
dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
return NULL;
}
mutex_lock(&obj->mmap_lock);
area = __find_iovm_area(obj, da);
if (!area) {
dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
goto out;
}
if ((area->flags & flags) != flags) {
dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
area->flags);
goto out;
}
sgt = (struct sg_table *)area->sgt;
unmap_iovm_area(domain, obj, area);
fn(area->va);
dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
area->da_start, da, area->da_end,
area->da_end - area->da_start, area->flags);
free_iovm_area(obj, area);
out:
mutex_unlock(&obj->mmap_lock);
return sgt;
}
static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
u32 da, const struct sg_table *sgt, void *va,
size_t bytes, u32 flags)
{
int err = -ENOMEM;
struct iovm_struct *new;
mutex_lock(&obj->mmap_lock);
new = alloc_iovm_area(obj, da, bytes, flags);
if (IS_ERR(new)) {
err = PTR_ERR(new);
goto err_alloc_iovma;
}
new->va = va;
new->sgt = sgt;
if (map_iovm_area(domain, new, sgt, new->flags))
goto err_map;
mutex_unlock(&obj->mmap_lock);
dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
__func__, new->da_start, bytes, new->flags, va);
return new->da_start;
err_map:
free_iovm_area(obj, new);
err_alloc_iovma:
mutex_unlock(&obj->mmap_lock);
return err;
}
static inline u32
__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
u32 da, const struct sg_table *sgt,
void *va, size_t bytes, u32 flags)
{
return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
}
/**
* omap_iommu_vmap - (d)-(p)-(v) address mapper
* @domain: iommu domain
* @dev: client device
* @sgt: address of scatter gather table
* @flags: iovma and page property
*
* Creates 1-n-1 mapping with given @sgt and returns @da.
* All @sgt element must be io page size aligned.
*/
u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
const struct sg_table *sgt, u32 flags)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
size_t bytes;
void *va = NULL;
if (!obj || !obj->dev || !sgt)
return -EINVAL;
bytes = sgtable_len(sgt);
if (!bytes)
return -EINVAL;
bytes = PAGE_ALIGN(bytes);
if (flags & IOVMF_MMIO) {
va = vmap_sg(sgt);
if (IS_ERR(va))
return PTR_ERR(va);
}
flags |= IOVMF_DISCONT;
flags |= IOVMF_MMIO;
da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da))
vunmap_sg(va);
return da + sgtable_offset(sgt);
}
EXPORT_SYMBOL_GPL(omap_iommu_vmap);
/**
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
* @domain: iommu domain
* @dev: client device
* @da: iommu device virtual address
*
* Free the iommu virtually contiguous memory area starting at
* @da, which was returned by 'omap_iommu_vmap()'.
*/
struct sg_table *
omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
/*
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
* Just returns 'sgt' to the caller to free
*/
da &= PAGE_MASK;
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
IOVMF_DISCONT | IOVMF_MMIO);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
return sgt;
}
EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
/**
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
* @dev: client device
* @da: contiguous iommu virtual memory
* @bytes: allocation size
* @flags: iovma and page property
*
* Allocate @bytes linearly and creates 1-n-1 mapping and returns
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32
omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
size_t bytes, u32 flags)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va;
struct sg_table *sgt;
if (!obj || !obj->dev || !bytes)
return -EINVAL;
bytes = PAGE_ALIGN(bytes);
va = vmalloc(bytes);
if (!va)
return -ENOMEM;
flags |= IOVMF_DISCONT;
flags |= IOVMF_ALLOC;
sgt = sgtable_alloc(bytes, flags, da, 0);
if (IS_ERR(sgt)) {
da = PTR_ERR(sgt);
goto err_sgt_alloc;
}
sgtable_fill_vmalloc(sgt, va);
da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da))
goto err_iommu_vmap;
return da;
err_iommu_vmap:
sgtable_drain_vmalloc(sgt);
sgtable_free(sgt);
err_sgt_alloc:
vfree(va);
return da;
}
EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
/**
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
* @dev: client device
* @da: iommu device virtual address
*
* Frees the iommu virtually continuous memory area starting at
* @da, as obtained from 'omap_iommu_vmalloc()'.
*/
void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
const u32 da)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
sgt = unmap_vm_area(domain, obj, da, vfree,
IOVMF_DISCONT | IOVMF_ALLOC);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt);
}
EXPORT_SYMBOL_GPL(omap_iommu_vfree);
static int __init iovmm_init(void)
{
const unsigned long flags = SLAB_HWCACHE_ALIGN;
struct kmem_cache *p;
p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
flags, NULL);
if (!p)
return -ENOMEM;
iovm_area_cachep = p;
return 0;
}
module_init(iovmm_init);
static void __exit iovmm_exit(void)
{
kmem_cache_destroy(iovm_area_cachep);
}
module_exit(iovmm_exit);
MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
mcmenaminadrian/Linux-devel | drivers/gpu/drm/gma500/mid_bios.c | 5348 | 8331 | /**************************************************************************
* Copyright (c) 2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
**************************************************************************/
/* TODO
* - Split functions by vbt type
* - Make them all take drm_device
* - Check ioremap failures
*/
#include <drm/drmP.h>
#include <drm/drm.h>
#include "gma_drm.h"
#include "psb_drv.h"
#include "mid_bios.h"
static void mid_get_fuse_settings(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
uint32_t fuse_value = 0;
uint32_t fuse_value_tmp = 0;
#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE (1 << 11)
#define FB_REG09 0xD0810900
#define FB_REG09 0xD0810900
#define FB_SKU_MASK 0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
#define FB_SKU_100L 1
#define FB_SKU_83 2
if (pci_root == NULL) {
WARN_ON(1);
return;
}
pci_write_config_dword(pci_root, 0xD0, FB_REG06);
pci_read_config_dword(pci_root, 0xD4, &fuse_value);
/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
if (IS_MRST(dev))
dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
DRM_INFO("internal display is %s\n",
dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
/* Prevent runtime suspend at start*/
if (dev_priv->iLVDS_enable) {
dev_priv->is_lvds_on = true;
dev_priv->is_mipi_on = false;
} else {
dev_priv->is_mipi_on = true;
dev_priv->is_lvds_on = false;
}
dev_priv->video_device_fuse = fuse_value;
pci_write_config_dword(pci_root, 0xD0, FB_REG09);
pci_read_config_dword(pci_root, 0xD4, &fuse_value);
dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
dev_priv->fuse_reg_value = fuse_value;
switch (fuse_value_tmp) {
case FB_SKU_100:
dev_priv->core_freq = 200;
break;
case FB_SKU_100L:
dev_priv->core_freq = 100;
break;
case FB_SKU_83:
dev_priv->core_freq = 166;
break;
default:
dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
fuse_value_tmp);
dev_priv->core_freq = 0;
}
dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
pci_dev_put(pci_root);
}
/*
* Get the revison ID, B0:D2:F0;0x08
*/
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
if (pci_gfx_root == NULL) {
WARN_ON(1);
return;
}
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
dev_priv->platform_rev_id);
}
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
u32 addr;
u16 new_size;
u8 *vbt_virtual;
u8 bpi;
u8 number_desc = 0;
struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
struct gct_r10_timing_info ti;
void *pGCT;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
/* Get the address of the platform config vbt, B0:D2:F0;0xFC */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
/* check for platform config address == 0. */
/* this means fw doesn't support vbt */
if (addr == 0) {
vbt->size = 0;
return;
}
/* get the virtual address of the vbt */
vbt_virtual = ioremap(addr, sizeof(*vbt));
if (vbt_virtual == NULL) {
vbt->size = 0;
return;
}
memcpy(vbt, vbt_virtual, sizeof(*vbt));
iounmap(vbt_virtual); /* Free virtual address space */
/* No matching signature don't process the data */
if (memcmp(vbt->signature, "$GCT", 4)) {
vbt->size = 0;
return;
}
dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
switch (vbt->revision) {
case 0:
vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
vbt->size - sizeof(*vbt) + 4);
pGCT = vbt->oaktrail_gct;
bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt =
((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
memcpy(&dev_priv->gct_data.DTD,
&((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
sizeof(struct oaktrail_timing_info));
dev_priv->gct_data.Panel_Port_Control =
((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
break;
case 1:
vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
vbt->size - sizeof(*vbt) + 4);
pGCT = vbt->oaktrail_gct;
bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt =
((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
memcpy(&dev_priv->gct_data.DTD,
&((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
sizeof(struct oaktrail_timing_info));
dev_priv->gct_data.Panel_Port_Control =
((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
break;
case 0x10:
/*header definition changed from rev 01 (v2) to rev 10h. */
/*so, some values have changed location*/
new_size = vbt->checksum; /*checksum contains lo size byte*/
/*LSB of oaktrail_gct contains hi size byte*/
new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
vbt->checksum = vbt->size; /*size contains the checksum*/
if (new_size > 0xff)
vbt->size = 0xff; /*restrict size to 255*/
else
vbt->size = new_size;
/* number of descriptors defined in the GCT */
number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
GCT_R10_DISPLAY_DESC_SIZE * number_desc);
pGCT = vbt->oaktrail_gct;
pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
/*copy the GCT display timings into a temp structure*/
memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
/*now copy the temp struct into the dev_priv->gct_data*/
dp_ti->pixel_clock = ti.pixel_clock;
dp_ti->hactive_hi = ti.hactive_hi;
dp_ti->hactive_lo = ti.hactive_lo;
dp_ti->hblank_hi = ti.hblank_hi;
dp_ti->hblank_lo = ti.hblank_lo;
dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
dp_ti->vactive_hi = ti.vactive_hi;
dp_ti->vactive_lo = ti.vactive_lo;
dp_ti->vblank_hi = ti.vblank_hi;
dp_ti->vblank_lo = ti.vblank_lo;
dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
/* Move the MIPI_Display_Descriptor data from GCT to dev priv */
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
*((u8 *)pGCT + 0x0d);
dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
(*((u8 *)pGCT + 0x0e)) << 8;
break;
default:
dev_err(dev->dev, "Unknown revision of GCT!\n");
vbt->size = 0;
}
}
int mid_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
mid_get_fuse_settings(dev);
mid_get_vbt_data(dev_priv);
mid_get_pci_revID(dev_priv);
return 0;
}
| gpl-2.0 |
AOKP/kernel_lge_mako | drivers/ide/alim15x3.c | 9188 | 15264 | /*
* Copyright (C) 1998-2000 Michel Aubry, Maintainer
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
* Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
*
* Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
* May be copied or modified under the terms of the GNU General Public License
* Copyright (C) 2002 Alan Cox
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
* Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
* Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* (U)DMA capable version of ali 1533/1543(C), 1535(D)
*
**********************************************************************
* 9/7/99 --Parts from the above author are included and need to be
* converted into standard interface, once I finish the thought.
*
* Recent changes
* Don't use LBA48 mode on ALi <= 0xC4
* Don't poke 0x79 with a non ALi northbridge
* Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang)
* Allow UDMA6 on revisions > 0xC4
*
* Documentation
* Chipset documentation available under NDA only
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/io.h>
#define DRV_NAME "alim15x3"
/*
* ALi devices are not plug in. Otherwise these static values would
* need to go. They ought to go away anyway
*/
static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
int pio_fifo = 0x54 + hwif->channel;
u8 fifo;
int shift = 4 * (drive->dn & 1);
pci_read_config_byte(pdev, pio_fifo, &fifo);
fifo &= ~(0x0F << shift);
fifo |= (on << shift);
pci_write_config_byte(pdev, pio_fifo, fifo);
}
static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
struct ide_timing *t, u8 ultra)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
int port = hwif->channel ? 0x5c : 0x58;
int udmat = 0x56 + hwif->channel;
u8 unit = drive->dn & 1, udma;
int shift = 4 * unit;
/* Set up the UDMA */
pci_read_config_byte(dev, udmat, &udma);
udma &= ~(0x0F << shift);
udma |= ultra << shift;
pci_write_config_byte(dev, udmat, udma);
if (t == NULL)
return;
t->setup = clamp_val(t->setup, 1, 8) & 7;
t->act8b = clamp_val(t->act8b, 1, 8) & 7;
t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
t->active = clamp_val(t->active, 1, 8) & 7;
t->recover = clamp_val(t->recover, 1, 16) & 15;
pci_write_config_byte(dev, port, t->setup);
pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
pci_write_config_byte(dev, port + unit + 2,
(t->active << 4) | t->recover);
}
/**
* ali_set_pio_mode - set host controller for PIO mode
* @hwif: port
* @drive: drive
*
* Program the controller for the given PIO mode.
*/
static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
ide_drive_t *pair = ide_get_pair_dev(drive);
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
unsigned long T = 1000000 / bus_speed; /* PCI clock based */
struct ide_timing t;
ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
if (pair) {
struct ide_timing p;
ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
ide_timing_merge(&p, &t, &t,
IDE_TIMING_SETUP | IDE_TIMING_8BIT);
if (pair->dma_mode) {
ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
ide_timing_merge(&p, &t, &t,
IDE_TIMING_SETUP | IDE_TIMING_8BIT);
}
}
/*
* PIO mode => ATA FIFO on, ATAPI FIFO off
*/
ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
ali_program_timings(hwif, drive, &t, 0);
}
/**
* ali_udma_filter - compute UDMA mask
* @drive: IDE device
*
* Return available UDMA modes.
*
* The actual rules for the ALi are:
* No UDMA on revisions <= 0x20
* Disk only for revisions < 0xC2
* Not WDC drives on M1543C-E (?)
*/
static u8 ali_udma_filter(ide_drive_t *drive)
{
if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
if (drive->media != ide_disk)
return 0;
if (chip_is_1543c_e &&
strstr((char *)&drive->id[ATA_ID_PROD], "WDC "))
return 0;
}
return drive->hwif->ultra_mask;
}
/**
* ali_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Configure the hardware for the desired IDE transfer mode.
*/
static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
unsigned long T = 1000000 / bus_speed; /* PCI clock based */
const u8 speed = drive->dma_mode;
u8 tmpbyte = 0x00;
struct ide_timing t;
if (speed < XFER_UDMA_0) {
ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
if (pair) {
struct ide_timing p;
ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
ide_timing_merge(&p, &t, &t,
IDE_TIMING_SETUP | IDE_TIMING_8BIT);
if (pair->dma_mode) {
ide_timing_compute(pair, pair->dma_mode,
&p, T, 1);
ide_timing_merge(&p, &t, &t,
IDE_TIMING_SETUP | IDE_TIMING_8BIT);
}
}
ali_program_timings(hwif, drive, &t, 0);
} else {
ali_program_timings(hwif, drive, NULL,
udma_timing[speed - XFER_UDMA_0]);
if (speed >= XFER_UDMA_3) {
pci_read_config_byte(dev, 0x4b, &tmpbyte);
tmpbyte |= 1;
pci_write_config_byte(dev, 0x4b, tmpbyte);
}
}
}
/**
* ali_dma_check - DMA check
* @drive: target device
* @cmd: command
*
* Returns 1 if the DMA cannot be performed, zero on success.
*/
static int ali_dma_check(ide_drive_t *drive, struct ide_cmd *cmd)
{
if (m5229_revision < 0xC2 && drive->media != ide_disk) {
if (cmd->tf_flags & IDE_TFLAG_WRITE)
return 1; /* try PIO instead of DMA */
}
return 0;
}
/**
* init_chipset_ali15x3 - Initialise an ALi IDE controller
* @dev: PCI device
*
* This function initializes the ALI IDE controller and where
* appropriate also sets up the 1533 southbridge.
*/
static int init_chipset_ali15x3(struct pci_dev *dev)
{
unsigned long flags;
u8 tmpbyte;
struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0));
m5229_revision = dev->revision;
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
local_irq_save(flags);
if (m5229_revision < 0xC2) {
/*
* revision 0x20 (1543-E, 1543-F)
* revision 0xC0, 0xC1 (1543C-C, 1543C-D, 1543C-E)
* clear CD-ROM DMA write bit, m5229, 0x4b, bit 7
*/
pci_read_config_byte(dev, 0x4b, &tmpbyte);
/*
* clear bit 7
*/
pci_write_config_byte(dev, 0x4b, tmpbyte & 0x7F);
/*
* check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010
*/
if (m5229_revision >= 0x20 && isa_dev) {
pci_read_config_byte(isa_dev, 0x5e, &tmpbyte);
chip_is_1543c_e = ((tmpbyte & 0x1e) == 0x12) ? 1: 0;
}
goto out;
}
/*
* 1543C-B?, 1535, 1535D, 1553
* Note 1: not all "motherboard" support this detection
* Note 2: if no udma 66 device, the detection may "error".
* but in this case, we will not set the device to
* ultra 66, the detection result is not important
*/
/*
* enable "Cable Detection", m5229, 0x4b, bit3
*/
pci_read_config_byte(dev, 0x4b, &tmpbyte);
pci_write_config_byte(dev, 0x4b, tmpbyte | 0x08);
/*
* We should only tune the 1533 enable if we are using an ALi
* North bridge. We might have no north found on some zany
* box without a device at 0:0.0. The ALi bridge will be at
* 0:0.0 so if we didn't find one we know what is cooking.
*/
if (north && north->vendor != PCI_VENDOR_ID_AL)
goto out;
if (m5229_revision < 0xC5 && isa_dev)
{
/*
* set south-bridge's enable bit, m1533, 0x79
*/
pci_read_config_byte(isa_dev, 0x79, &tmpbyte);
if (m5229_revision == 0xC2) {
/*
* 1543C-B0 (m1533, 0x79, bit 2)
*/
pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x04);
} else if (m5229_revision >= 0xC3) {
/*
* 1553/1535 (m1533, 0x79, bit 1)
*/
pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x02);
}
}
out:
/*
* CD_ROM DMA on (m5229, 0x53, bit0)
* Enable this bit even if we want to use PIO.
* PIO FIFO off (m5229, 0x53, bit1)
* The hardware will use 0x54h and 0x55h to control PIO FIFO.
* (Not on later devices it seems)
*
* 0x53 changes meaning on later revs - we must no touch
* bit 1 on them. Need to check if 0x20 is the right break.
*/
if (m5229_revision >= 0x20) {
pci_read_config_byte(dev, 0x53, &tmpbyte);
if (m5229_revision <= 0x20)
tmpbyte = (tmpbyte & (~0x02)) | 0x01;
else if (m5229_revision == 0xc7 || m5229_revision == 0xc8)
tmpbyte |= 0x03;
else
tmpbyte |= 0x01;
pci_write_config_byte(dev, 0x53, tmpbyte);
}
pci_dev_put(north);
pci_dev_put(isa_dev);
local_irq_restore(flags);
return 0;
}
/*
* Cable special cases
*/
static const struct dmi_system_id cable_dmi_table[] = {
{
.ident = "HP Pavilion N5430",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
},
},
{
.ident = "Toshiba Satellite S1800-814",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
},
},
{ }
};
static int ali_cable_override(struct pci_dev *pdev)
{
/* Fujitsu P2000 */
if (pdev->subsystem_vendor == 0x10CF &&
pdev->subsystem_device == 0x10AF)
return 1;
/* Mitac 8317 (Winbook-A) and relatives */
if (pdev->subsystem_vendor == 0x1071 &&
pdev->subsystem_device == 0x8317)
return 1;
/* Systems by DMI */
if (dmi_check_system(cable_dmi_table))
return 1;
return 0;
}
/**
* ali_cable_detect - cable detection
* @hwif: IDE interface
*
* This checks if the controller and the cable are capable
* of UDMA66 transfers. It doesn't check the drives.
*/
static u8 ali_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 cbl = ATA_CBL_PATA40, tmpbyte;
if (m5229_revision >= 0xC2) {
/*
* m5229 80-pin cable detection (from Host View)
*
* 0x4a bit0 is 0 => primary channel has 80-pin
* 0x4a bit1 is 0 => secondary channel has 80-pin
*
* Certain laptops use short but suitable cables
* and don't implement the detect logic.
*/
if (ali_cable_override(dev))
cbl = ATA_CBL_PATA40_SHORT;
else {
pci_read_config_byte(dev, 0x4a, &tmpbyte);
if ((tmpbyte & (1 << hwif->channel)) == 0)
cbl = ATA_CBL_PATA80;
}
}
return cbl;
}
#ifndef CONFIG_SPARC64
/**
* init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
* @hwif: interface to configure
*
* Obtain the IRQ tables for an ALi based IDE solution on the PC
* class platforms. This part of the code isn't applicable to the
* Sparc systems.
*/
static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
{
u8 ideic, inmir;
s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
1, 11, 0, 12, 0, 14, 0, 15 };
int irq = -1;
if (isa_dev) {
/*
* read IDE interface control
*/
pci_read_config_byte(isa_dev, 0x58, &ideic);
/* bit0, bit1 */
ideic = ideic & 0x03;
/* get IRQ for IDE Controller */
if ((hwif->channel && ideic == 0x03) ||
(!hwif->channel && !ideic)) {
/*
* get SIRQ1 routing table
*/
pci_read_config_byte(isa_dev, 0x44, &inmir);
inmir = inmir & 0x0f;
irq = irq_routing_table[inmir];
} else if (hwif->channel && !(ideic & 0x01)) {
/*
* get SIRQ2 routing table
*/
pci_read_config_byte(isa_dev, 0x75, &inmir);
inmir = inmir & 0x0f;
irq = irq_routing_table[inmir];
}
if(irq >= 0)
hwif->irq = irq;
}
}
#else
#define init_hwif_ali15x3 NULL
#endif /* CONFIG_SPARC64 */
/**
* init_dma_ali15x3 - set up DMA on ALi15x3
* @hwif: IDE interface
* @d: IDE port info
*
* Set up the DMA functionality on the ALi 15x3.
*/
static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = ide_pci_dma_base(hwif, d);
if (base == 0)
return -1;
hwif->dma_base = base;
if (ide_pci_check_simplex(hwif, d) < 0)
return -1;
if (ide_pci_set_master(dev, d->name) < 0)
return -1;
if (!hwif->channel)
outb(inb(base + 2) & 0x60, base + 2);
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
if (ide_allocate_dma_engine(hwif))
return -1;
return 0;
}
static const struct ide_port_ops ali_port_ops = {
.set_pio_mode = ali_set_pio_mode,
.set_dma_mode = ali_set_dma_mode,
.udma_filter = ali_udma_filter,
.cable_detect = ali_cable_detect,
};
static const struct ide_dma_ops ali_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_start = ide_dma_start,
.dma_end = ide_dma_end,
.dma_test_irq = ide_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
.dma_check = ali_dma_check,
.dma_timer_expiry = ide_dma_sff_timer_expiry,
.dma_sff_read_status = ide_dma_sff_read_status,
};
static const struct ide_port_info ali15x3_chipset __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_ali15x3,
.init_hwif = init_hwif_ali15x3,
.init_dma = init_dma_ali15x3,
.port_ops = &ali_port_ops,
.dma_ops = &sff_dma_ops,
.pio_mask = ATA_PIO5,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
};
/**
* alim15x3_init_one - set up an ALi15x3 IDE controller
* @dev: PCI device to set up
*
* Perform the actual set up for an ALi15x3 that has been found by the
* hot plug layer.
*/
static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct ide_port_info d = ali15x3_chipset;
u8 rev = dev->revision, idx = id->driver_data;
/* don't use LBA48 DMA on ALi devices before rev 0xC5 */
if (rev <= 0xC4)
d.host_flags |= IDE_HFLAG_NO_LBA48_DMA;
if (rev >= 0x20) {
if (rev == 0x20)
d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
if (rev < 0xC2)
d.udma_mask = ATA_UDMA2;
else if (rev == 0xC2 || rev == 0xC3)
d.udma_mask = ATA_UDMA4;
else if (rev == 0xC4)
d.udma_mask = ATA_UDMA5;
else
d.udma_mask = ATA_UDMA6;
d.dma_ops = &ali_dma_ops;
} else {
d.host_flags |= IDE_HFLAG_NO_DMA;
d.mwdma_mask = d.swdma_mask = 0;
}
if (idx == 0)
d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
return ide_pci_init_one(dev, &d, NULL);
}
static const struct pci_device_id alim15x3_pci_tbl[] = {
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), 0 },
{ PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), 1 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl);
static struct pci_driver alim15x3_pci_driver = {
.name = "ALI15x3_IDE",
.id_table = alim15x3_pci_tbl,
.probe = alim15x3_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init ali15x3_ide_init(void)
{
return ide_pci_register_driver(&alim15x3_pci_driver);
}
static void __exit ali15x3_ide_exit(void)
{
pci_unregister_driver(&alim15x3_pci_driver);
}
module_init(ali15x3_ide_init);
module_exit(ali15x3_ide_exit);
MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Lloir/elitekernel_oxp_kk | drivers/video/omap/lcdc.c | 9188 | 19816 | /*
* OMAP1 internal LCD controller
*
* Copyright (C) 2004 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <mach/lcdc.h>
#include <plat/dma.h>
#include <asm/mach-types.h>
#include "omapfb.h"
#include "lcdc.h"
#define MODULE_NAME "lcdc"
#define MAX_PALETTE_SIZE PAGE_SIZE
enum lcdc_load_mode {
OMAP_LCDC_LOAD_PALETTE,
OMAP_LCDC_LOAD_FRAME,
OMAP_LCDC_LOAD_PALETTE_AND_FRAME
};
static struct omap_lcd_controller {
enum omapfb_update_mode update_mode;
int ext_mode;
unsigned long frame_offset;
int screen_width;
int xres;
int yres;
enum omapfb_color_format color_mode;
int bpp;
void *palette_virt;
dma_addr_t palette_phys;
int palette_code;
int palette_size;
unsigned int irq_mask;
struct completion last_frame_complete;
struct completion palette_load_complete;
struct clk *lcd_ck;
struct omapfb_device *fbdev;
void (*dma_callback)(void *data);
void *dma_callback_data;
int fbmem_allocated;
dma_addr_t vram_phys;
void *vram_virt;
unsigned long vram_size;
} lcdc;
static void inline enable_irqs(int mask)
{
lcdc.irq_mask |= mask;
}
static void inline disable_irqs(int mask)
{
lcdc.irq_mask &= ~mask;
}
static void set_load_mode(enum lcdc_load_mode mode)
{
u32 l;
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~(3 << 20);
switch (mode) {
case OMAP_LCDC_LOAD_PALETTE:
l |= 1 << 20;
break;
case OMAP_LCDC_LOAD_FRAME:
l |= 2 << 20;
break;
case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
break;
default:
BUG();
}
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void enable_controller(void)
{
u32 l;
l = omap_readl(OMAP_LCDC_CONTROL);
l |= OMAP_LCDC_CTRL_LCD_EN;
l &= ~OMAP_LCDC_IRQ_MASK;
l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void disable_controller_async(void)
{
u32 l;
u32 mask;
l = omap_readl(OMAP_LCDC_CONTROL);
mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
/*
* Preserve the DONE mask, since we still want to get the
* final DONE irq. It will be disabled in the IRQ handler.
*/
mask &= ~OMAP_LCDC_IRQ_DONE;
l &= ~mask;
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void disable_controller(void)
{
init_completion(&lcdc.last_frame_complete);
disable_controller_async();
if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
msecs_to_jiffies(500)))
dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
}
static void reset_controller(u32 status)
{
static unsigned long reset_count;
static unsigned long last_jiffies;
disable_controller_async();
reset_count++;
if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
dev_err(lcdc.fbdev->dev,
"resetting (status %#010x,reset count %lu)\n",
status, reset_count);
last_jiffies = jiffies;
}
if (reset_count < 100) {
enable_controller();
} else {
reset_count = 0;
dev_err(lcdc.fbdev->dev,
"too many reset attempts, giving up.\n");
}
}
/*
* Configure the LCD DMA according to the current mode specified by parameters
* in lcdc.fbdev and fbdev->var.
*/
static void setup_lcd_dma(void)
{
static const int dma_elem_type[] = {
0,
OMAP_DMA_DATA_TYPE_S8,
OMAP_DMA_DATA_TYPE_S16,
0,
OMAP_DMA_DATA_TYPE_S32,
};
struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
unsigned long src;
int esize, xelem, yelem;
src = lcdc.vram_phys + lcdc.frame_offset;
switch (var->rotate) {
case 0:
if (plane->info.mirror || (src & 3) ||
lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
(lcdc.xres & 1))
esize = 2;
else
esize = 4;
xelem = lcdc.xres * lcdc.bpp / 8 / esize;
yelem = lcdc.yres;
break;
case 90:
case 180:
case 270:
if (cpu_is_omap15xx()) {
BUG();
}
esize = 2;
xelem = lcdc.yres * lcdc.bpp / 16;
yelem = lcdc.xres;
break;
default:
BUG();
return;
}
#ifdef VERBOSE
dev_dbg(lcdc.fbdev->dev,
"setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
src, esize, xelem, yelem);
#endif
omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
if (!cpu_is_omap15xx()) {
int bpp = lcdc.bpp;
/*
* YUV support is only for external mode when we have the
* YUV window embedded in a 16bpp frame buffer.
*/
if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
bpp = 16;
/* Set virtual xres elem size */
omap_set_lcd_dma_b1_vxres(
lcdc.screen_width * bpp / 8 / esize);
/* Setup transformations */
omap_set_lcd_dma_b1_rotation(var->rotate);
omap_set_lcd_dma_b1_mirror(plane->info.mirror);
}
omap_setup_lcd_dma();
}
static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
{
u32 status;
status = omap_readl(OMAP_LCDC_STATUS);
if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
reset_controller(status);
else {
if (status & OMAP_LCDC_STAT_DONE) {
u32 l;
/*
* Disable IRQ_DONE. The status bit will be cleared
* only when the controller is reenabled and we don't
* want to get more interrupts.
*/
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~OMAP_LCDC_IRQ_DONE;
omap_writel(l, OMAP_LCDC_CONTROL);
complete(&lcdc.last_frame_complete);
}
if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
disable_controller_async();
complete(&lcdc.palette_load_complete);
}
}
/*
* Clear these interrupt status bits.
* Sync_lost, FUF bits were cleared by disabling the LCD controller
* LOADED_PALETTE can be cleared this way only in palette only
* load mode. In other load modes it's cleared by disabling the
* controller.
*/
status &= ~(OMAP_LCDC_STAT_VSYNC |
OMAP_LCDC_STAT_LOADED_PALETTE |
OMAP_LCDC_STAT_ABC |
OMAP_LCDC_STAT_LINE_INT);
omap_writel(status, OMAP_LCDC_STATUS);
return IRQ_HANDLED;
}
/*
* Change to a new video mode. We defer this to a later time to avoid any
* flicker and not to mess up the current LCD DMA context. For this we disable
* the LCD controller, which will generate a DONE irq after the last frame has
* been transferred. Then it'll be safe to reconfigure both the LCD controller
* as well as the LCD DMA.
*/
static int omap_lcdc_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
int pos_x, int pos_y, int width, int height,
int color_mode)
{
struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
struct lcd_panel *panel = lcdc.fbdev->panel;
int rot_x, rot_y;
if (var->rotate == 0) {
rot_x = panel->x_res;
rot_y = panel->y_res;
} else {
rot_x = panel->y_res;
rot_y = panel->x_res;
}
if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
width > rot_x || height > rot_y) {
#ifdef VERBOSE
dev_dbg(lcdc.fbdev->dev,
"invalid plane params plane %d pos_x %d pos_y %d "
"w %d h %d\n", plane, pos_x, pos_y, width, height);
#endif
return -EINVAL;
}
lcdc.frame_offset = offset;
lcdc.xres = width;
lcdc.yres = height;
lcdc.screen_width = screen_width;
lcdc.color_mode = color_mode;
switch (color_mode) {
case OMAPFB_COLOR_CLUT_8BPP:
lcdc.bpp = 8;
lcdc.palette_code = 0x3000;
lcdc.palette_size = 512;
break;
case OMAPFB_COLOR_RGB565:
lcdc.bpp = 16;
lcdc.palette_code = 0x4000;
lcdc.palette_size = 32;
break;
case OMAPFB_COLOR_RGB444:
lcdc.bpp = 16;
lcdc.palette_code = 0x4000;
lcdc.palette_size = 32;
break;
case OMAPFB_COLOR_YUV420:
if (lcdc.ext_mode) {
lcdc.bpp = 12;
break;
}
/* fallthrough */
case OMAPFB_COLOR_YUV422:
if (lcdc.ext_mode) {
lcdc.bpp = 16;
break;
}
/* fallthrough */
default:
/* FIXME: other BPPs.
* bpp1: code 0, size 256
* bpp2: code 0x1000 size 256
* bpp4: code 0x2000 size 256
* bpp12: code 0x4000 size 32
*/
dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
BUG();
return -1;
}
if (lcdc.ext_mode) {
setup_lcd_dma();
return 0;
}
if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
disable_controller();
omap_stop_lcd_dma();
setup_lcd_dma();
enable_controller();
}
return 0;
}
static int omap_lcdc_enable_plane(int plane, int enable)
{
dev_dbg(lcdc.fbdev->dev,
"plane %d enable %d update_mode %d ext_mode %d\n",
plane, enable, lcdc.update_mode, lcdc.ext_mode);
if (plane != OMAPFB_PLANE_GFX)
return -EINVAL;
return 0;
}
/*
* Configure the LCD DMA for a palette load operation and do the palette
* downloading synchronously. We don't use the frame+palette load mode of
* the controller, since the palette can always be downloaded separately.
*/
static void load_palette(void)
{
u16 *palette;
palette = (u16 *)lcdc.palette_virt;
*(u16 *)palette &= 0x0fff;
*(u16 *)palette |= lcdc.palette_code;
omap_set_lcd_dma_b1(lcdc.palette_phys,
lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
omap_set_lcd_dma_single_transfer(1);
omap_setup_lcd_dma();
init_completion(&lcdc.palette_load_complete);
enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
set_load_mode(OMAP_LCDC_LOAD_PALETTE);
enable_controller();
if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
msecs_to_jiffies(500)))
dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
/* The controller gets disabled in the irq handler */
disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
omap_stop_lcd_dma();
omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
}
/* Used only in internal controller mode */
static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
u16 transp, int update_hw_pal)
{
u16 *palette;
if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
return -EINVAL;
palette = (u16 *)lcdc.palette_virt;
palette[regno] &= ~0x0fff;
palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
(blue >> 12);
if (update_hw_pal) {
disable_controller();
omap_stop_lcd_dma();
load_palette();
setup_lcd_dma();
set_load_mode(OMAP_LCDC_LOAD_FRAME);
enable_controller();
}
return 0;
}
static void calc_ck_div(int is_tft, int pck, int *pck_div)
{
unsigned long lck;
pck = max(1, pck);
lck = clk_get_rate(lcdc.lcd_ck);
*pck_div = (lck + pck - 1) / pck;
if (is_tft)
*pck_div = max(2, *pck_div);
else
*pck_div = max(3, *pck_div);
if (*pck_div > 255) {
/* FIXME: try to adjust logic clock divider as well */
*pck_div = 255;
dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
pck / 1000);
}
}
static void inline setup_regs(void)
{
u32 l;
struct lcd_panel *panel = lcdc.fbdev->panel;
int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
unsigned long lck;
int pcd;
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~OMAP_LCDC_CTRL_LCD_TFT;
l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
#ifdef CONFIG_MACH_OMAP_PALMTE
/* FIXME:if (machine_is_omap_palmte()) { */
/* PalmTE uses alternate TFT setting in 8BPP mode */
l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
/* } */
#endif
omap_writel(l, OMAP_LCDC_CONTROL);
l = omap_readl(OMAP_LCDC_TIMING2);
l &= ~(((1 << 6) - 1) << 20);
l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
omap_writel(l, OMAP_LCDC_TIMING2);
l = panel->x_res - 1;
l |= (panel->hsw - 1) << 10;
l |= (panel->hfp - 1) << 16;
l |= (panel->hbp - 1) << 24;
omap_writel(l, OMAP_LCDC_TIMING0);
l = panel->y_res - 1;
l |= (panel->vsw - 1) << 10;
l |= panel->vfp << 16;
l |= panel->vbp << 24;
omap_writel(l, OMAP_LCDC_TIMING1);
l = omap_readl(OMAP_LCDC_TIMING2);
l &= ~0xff;
lck = clk_get_rate(lcdc.lcd_ck);
if (!panel->pcd)
calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
else {
dev_warn(lcdc.fbdev->dev,
"Pixel clock divider value is obsolete.\n"
"Try to set pixel_clock to %lu and pcd to 0 "
"in drivers/video/omap/lcd_%s.c and submit a patch.\n",
lck / panel->pcd / 1000, panel->name);
pcd = panel->pcd;
}
l |= pcd & 0xff;
l |= panel->acb << 8;
omap_writel(l, OMAP_LCDC_TIMING2);
/* update panel info with the exact clock */
panel->pixel_clock = lck / pcd / 1000;
}
/*
* Configure the LCD controller, download the color palette and start a looped
* DMA transfer of the frame image data. Called only in internal
* controller mode.
*/
static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
{
int r = 0;
if (mode != lcdc.update_mode) {
switch (mode) {
case OMAPFB_AUTO_UPDATE:
setup_regs();
load_palette();
/* Setup and start LCD DMA */
setup_lcd_dma();
set_load_mode(OMAP_LCDC_LOAD_FRAME);
enable_irqs(OMAP_LCDC_IRQ_DONE);
/* This will start the actual DMA transfer */
enable_controller();
lcdc.update_mode = mode;
break;
case OMAPFB_UPDATE_DISABLED:
disable_controller();
omap_stop_lcd_dma();
lcdc.update_mode = mode;
break;
default:
r = -EINVAL;
}
}
return r;
}
static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
{
return lcdc.update_mode;
}
/* PM code called only in internal controller mode */
static void omap_lcdc_suspend(void)
{
omap_lcdc_set_update_mode(OMAPFB_UPDATE_DISABLED);
}
static void omap_lcdc_resume(void)
{
omap_lcdc_set_update_mode(OMAPFB_AUTO_UPDATE);
}
static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
{
return;
}
int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
{
BUG_ON(callback == NULL);
if (lcdc.dma_callback)
return -EBUSY;
else {
lcdc.dma_callback = callback;
lcdc.dma_callback_data = data;
}
return 0;
}
EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
void omap_lcdc_free_dma_callback(void)
{
lcdc.dma_callback = NULL;
}
EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
static void lcdc_dma_handler(u16 status, void *data)
{
if (lcdc.dma_callback)
lcdc.dma_callback(lcdc.dma_callback_data);
}
static int mmap_kern(void)
{
struct vm_struct *kvma;
struct vm_area_struct vma;
pgprot_t pgprot;
unsigned long vaddr;
kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
if (kvma == NULL) {
dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
return -ENOMEM;
}
vma.vm_mm = &init_mm;
vaddr = (unsigned long)kvma->addr;
vma.vm_start = vaddr;
vma.vm_end = vaddr + lcdc.vram_size;
pgprot = pgprot_writecombine(pgprot_kernel);
if (io_remap_pfn_range(&vma, vaddr,
lcdc.vram_phys >> PAGE_SHIFT,
lcdc.vram_size, pgprot) < 0) {
dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
return -EAGAIN;
}
lcdc.vram_virt = (void *)vaddr;
return 0;
}
static void unmap_kern(void)
{
vunmap(lcdc.vram_virt);
}
static int alloc_palette_ram(void)
{
lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
if (lcdc.palette_virt == NULL) {
dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
return -ENOMEM;
}
memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
return 0;
}
static void free_palette_ram(void)
{
dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
lcdc.palette_virt, lcdc.palette_phys);
}
static int alloc_fbmem(struct omapfb_mem_region *region)
{
int bpp;
int frame_size;
struct lcd_panel *panel = lcdc.fbdev->panel;
bpp = panel->bpp;
if (bpp == 12)
bpp = 16;
frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
if (region->size > frame_size)
frame_size = region->size;
lcdc.vram_size = frame_size;
lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
if (lcdc.vram_virt == NULL) {
dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
return -ENOMEM;
}
region->size = frame_size;
region->paddr = lcdc.vram_phys;
region->vaddr = lcdc.vram_virt;
region->alloc = 1;
memset(lcdc.vram_virt, 0, lcdc.vram_size);
return 0;
}
static void free_fbmem(void)
{
dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
lcdc.vram_virt, lcdc.vram_phys);
}
static int setup_fbmem(struct omapfb_mem_desc *req_md)
{
int r;
if (!req_md->region_cnt) {
dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
return -EINVAL;
}
if (req_md->region_cnt > 1) {
dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
req_md->region_cnt = 1;
}
if (req_md->region[0].paddr == 0) {
lcdc.fbmem_allocated = 1;
if ((r = alloc_fbmem(&req_md->region[0])) < 0)
return r;
return 0;
}
lcdc.vram_phys = req_md->region[0].paddr;
lcdc.vram_size = req_md->region[0].size;
if ((r = mmap_kern()) < 0)
return r;
dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
return 0;
}
static void cleanup_fbmem(void)
{
if (lcdc.fbmem_allocated)
free_fbmem();
else
unmap_kern();
}
static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
struct omapfb_mem_desc *req_vram)
{
int r;
u32 l;
int rate;
struct clk *tc_ck;
lcdc.irq_mask = 0;
lcdc.fbdev = fbdev;
lcdc.ext_mode = ext_mode;
l = 0;
omap_writel(l, OMAP_LCDC_CONTROL);
/* FIXME:
* According to errata some platforms have a clock rate limitiation
*/
lcdc.lcd_ck = clk_get(fbdev->dev, "lcd_ck");
if (IS_ERR(lcdc.lcd_ck)) {
dev_err(fbdev->dev, "unable to access LCD clock\n");
r = PTR_ERR(lcdc.lcd_ck);
goto fail0;
}
tc_ck = clk_get(fbdev->dev, "tc_ck");
if (IS_ERR(tc_ck)) {
dev_err(fbdev->dev, "unable to access TC clock\n");
r = PTR_ERR(tc_ck);
goto fail1;
}
rate = clk_get_rate(tc_ck);
clk_put(tc_ck);
if (machine_is_ams_delta())
rate /= 4;
if (machine_is_omap_h3())
rate /= 3;
r = clk_set_rate(lcdc.lcd_ck, rate);
if (r) {
dev_err(fbdev->dev, "failed to adjust LCD rate\n");
goto fail1;
}
clk_enable(lcdc.lcd_ck);
r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
if (r) {
dev_err(fbdev->dev, "unable to get IRQ\n");
goto fail2;
}
r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
if (r) {
dev_err(fbdev->dev, "unable to get LCD DMA\n");
goto fail3;
}
omap_set_lcd_dma_single_transfer(ext_mode);
omap_set_lcd_dma_ext_controller(ext_mode);
if (!ext_mode)
if ((r = alloc_palette_ram()) < 0)
goto fail4;
if ((r = setup_fbmem(req_vram)) < 0)
goto fail5;
pr_info("omapfb: LCDC initialized\n");
return 0;
fail5:
if (!ext_mode)
free_palette_ram();
fail4:
omap_free_lcd_dma();
fail3:
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
fail2:
clk_disable(lcdc.lcd_ck);
fail1:
clk_put(lcdc.lcd_ck);
fail0:
return r;
}
static void omap_lcdc_cleanup(void)
{
if (!lcdc.ext_mode)
free_palette_ram();
cleanup_fbmem();
omap_free_lcd_dma();
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
clk_disable(lcdc.lcd_ck);
clk_put(lcdc.lcd_ck);
}
const struct lcd_ctrl omap1_int_ctrl = {
.name = "internal",
.init = omap_lcdc_init,
.cleanup = omap_lcdc_cleanup,
.get_caps = omap_lcdc_get_caps,
.set_update_mode = omap_lcdc_set_update_mode,
.get_update_mode = omap_lcdc_get_update_mode,
.update_window = NULL,
.suspend = omap_lcdc_suspend,
.resume = omap_lcdc_resume,
.setup_plane = omap_lcdc_setup_plane,
.enable_plane = omap_lcdc_enable_plane,
.setcolreg = omap_lcdc_setcolreg,
};
| gpl-2.0 |
kbc-developers/android_kernel_samsung_msm8660 | drivers/scsi/arcmsr/arcmsr_attr.c | 11236 | 13042 | /*
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_attr.c
** BY : Nick Cheng
** Description: attributes exported to sysfs and device host
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
** E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
*******************************************************************************
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
struct device_attribute *arcmsr_host_attrs[];
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer,*ptmpQbuffer;
int32_t allxfer_len = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* do message unit read. */
ptmpQbuffer = (uint8_t *)buf;
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
memcpy(ptmpQbuffer, pQbuffer, 1);
acb->rqbuf_firstindex++;
acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
ptmpQbuffer++;
allxfer_len++;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
acb->rqbuf_lastindex++;
acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
iop_len--;
}
arcmsr_iop_message_read(acb);
}
return (allxfer_len);
}
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (count > 1032)
return -EINVAL;
/* do message unit write. */
ptmpuserbuffer = (uint8_t *)buf;
user_len = (int32_t)count;
wqbuf_lastindex = acb->wqbuf_lastindex;
wqbuf_firstindex = acb->wqbuf_firstindex;
if (wqbuf_lastindex != wqbuf_firstindex) {
arcmsr_post_ioctldata2iop(acb);
return 0; /*need retry*/
} else {
my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
&(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
pQbuffer =
&acb->wqbuffer[acb->wqbuf_lastindex];
memcpy(pQbuffer, ptmpuserbuffer, 1);
acb->wqbuf_lastindex++;
acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
ptmpuserbuffer++;
user_len--;
}
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
acb->acb_flags &=
~ACB_F_MESSAGE_WQBUFFER_CLEARED;
arcmsr_post_ioctldata2iop(acb);
}
return count;
} else {
return 0; /*need retry*/
}
}
}
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED
| ACB_F_MESSAGE_RQBUFFER_CLEARED
| ACB_F_MESSAGE_WQBUFFER_READED);
acb->rqbuf_firstindex = 0;
acb->rqbuf_lastindex = 0;
acb->wqbuf_firstindex = 0;
acb->wqbuf_lastindex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
return 1;
}
static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
};
static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
};
static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
int error;
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
goto error_bin_file_message_read;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
goto error_bin_file_message_write;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
goto error_bin_file_message_clear;
}
return 0;
error_bin_file_message_clear:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
error_bin_file_message_write:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
error_bin_file_message_read:
return error;
}
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
}
static ssize_t
arcmsr_attr_host_driver_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"%s\n",
ARCMSR_DRIVER_VERSION);
}
static ssize_t
arcmsr_attr_host_driver_posted_cmd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
atomic_read(&acb->ccboutstandingcount));
}
static ssize_t
arcmsr_attr_host_driver_reset(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_resets);
}
static ssize_t
arcmsr_attr_host_driver_abort(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_aborts);
}
static ssize_t
arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_model);
}
static ssize_t
arcmsr_attr_host_fw_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_version);
}
static ssize_t
arcmsr_attr_host_fw_request_len(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_request_len);
}
static ssize_t
arcmsr_attr_host_fw_numbers_queue(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_numbers_queue);
}
static ssize_t
arcmsr_attr_host_fw_sdram_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_sdram_size);
}
static ssize_t
arcmsr_attr_host_fw_hd_channels(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_hd_channels);
}
static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
struct device_attribute *arcmsr_host_attrs[] = {
&dev_attr_host_driver_version,
&dev_attr_host_driver_posted_cmd,
&dev_attr_host_driver_reset,
&dev_attr_host_driver_abort,
&dev_attr_host_fw_model,
&dev_attr_host_fw_version,
&dev_attr_host_fw_request_len,
&dev_attr_host_fw_numbers_queue,
&dev_attr_host_fw_sdram_size,
&dev_attr_host_fw_hd_channels,
NULL,
};
| gpl-2.0 |
fenten/Kernel-XT701 | arch/mips/fw/cfe/cfe_api.c | 11236 | 11211 | /*
* Copyright (C) 2000, 2001, 2002 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
*
* Broadcom Common Firmware Environment (CFE)
*
* This module contains device function stubs (small routines to
* call the standard "iocb" interface entry point to CFE).
* There should be one routine here per iocb function call.
*
* Authors: Mitch Lichtenberg, Chris Demetriou
*/
#include <asm/fw/cfe/cfe_api.h>
#include "cfe_api_int.h"
/* Cast from a native pointer to a cfe_xptr_t and back. */
#define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n))
#define NATIVE_FROM_XPTR(x) ((void *) (intptr_t) (x))
int cfe_iocb_dispatch(struct cfe_xiocb *xiocb);
/*
* Declare the dispatch function with args of "intptr_t".
* This makes sure whatever model we're compiling in
* puts the pointers in a single register. For example,
* combining -mlong64 and -mips1 or -mips2 would lead to
* trouble, since the handle and IOCB pointer will be
* passed in two registers each, and CFE expects one.
*/
static int (*cfe_dispfunc) (intptr_t handle, intptr_t xiocb);
static u64 cfe_handle;
int cfe_init(u64 handle, u64 ept)
{
cfe_dispfunc = NATIVE_FROM_XPTR(ept);
cfe_handle = handle;
return 0;
}
int cfe_iocb_dispatch(struct cfe_xiocb * xiocb)
{
if (!cfe_dispfunc)
return -1;
return (*cfe_dispfunc) ((intptr_t) cfe_handle, (intptr_t) xiocb);
}
int cfe_close(int handle)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_CLOSE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_START;
xiocb.plist.xiocb_cpuctl.gp_val = gp;
xiocb.plist.xiocb_cpuctl.sp_val = sp;
xiocb.plist.xiocb_cpuctl.a1_val = a1;
xiocb.plist.xiocb_cpuctl.start_addr = (long) fn;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_cpu_stop(int cpu)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_STOP;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = idx;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = namelen;
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
xiocb.plist.xiocb_envbuf.val_length = vallen;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int
cfe_enummem(int idx, int flags, u64 *start, u64 *length, u64 *type)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_MEMENUM;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flags;
xiocb.xiocb_psize = sizeof(struct xiocb_meminfo);
xiocb.plist.xiocb_meminfo.mi_idx = idx;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
*start = xiocb.plist.xiocb_meminfo.mi_addr;
*length = xiocb.plist.xiocb_meminfo.mi_size;
*type = xiocb.plist.xiocb_meminfo.mi_type;
return 0;
}
int cfe_exit(int warm, int status)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_RESTART;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = warm ? CFE_FLG_WARMSTART : 0;
xiocb.xiocb_psize = sizeof(struct xiocb_exitstat);
xiocb.plist.xiocb_exitstat.status = status;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_flushcache(int flg)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_FLUSHCACHE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flg;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_getdevinfo(char *name)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_GETINFO;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = 0;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_buffer.buf_length = strlen(name);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_ioctlcmd;
}
int cfe_getenv(char *name, char *dest, int destlen)
{
struct cfe_xiocb xiocb;
*dest = 0;
xiocb.xiocb_fcode = CFE_CMD_ENV_GET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = 0;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = strlen(name);
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(dest);
xiocb.plist.xiocb_envbuf.val_length = destlen;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_getfwinfo(cfe_fwinfo_t * info)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_GETINFO;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_fwinfo);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
info->fwi_version = xiocb.plist.xiocb_fwinfo.fwi_version;
info->fwi_totalmem = xiocb.plist.xiocb_fwinfo.fwi_totalmem;
info->fwi_flags = xiocb.plist.xiocb_fwinfo.fwi_flags;
info->fwi_boardid = xiocb.plist.xiocb_fwinfo.fwi_boardid;
info->fwi_bootarea_va = xiocb.plist.xiocb_fwinfo.fwi_bootarea_va;
info->fwi_bootarea_pa = xiocb.plist.xiocb_fwinfo.fwi_bootarea_pa;
info->fwi_bootarea_size =
xiocb.plist.xiocb_fwinfo.fwi_bootarea_size;
#if 0
info->fwi_reserved1 = xiocb.plist.xiocb_fwinfo.fwi_reserved1;
info->fwi_reserved2 = xiocb.plist.xiocb_fwinfo.fwi_reserved2;
info->fwi_reserved3 = xiocb.plist.xiocb_fwinfo.fwi_reserved3;
#endif
return 0;
}
int cfe_getstdhandle(int flg)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_GETHANDLE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flg;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.xiocb_handle;
}
int64_t
cfe_getticks(void)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_GETTIME;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_time);
xiocb.plist.xiocb_time.ticks = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.plist.xiocb_time.ticks;
}
int cfe_inpstat(int handle)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_INPSTAT;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_inpstat);
xiocb.plist.xiocb_inpstat.inp_status = 0;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_inpstat.inp_status;
}
int
cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer,
int length, int *retlen, u64 offset)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_IOCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ioctlcmd = ioctlnum;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (retlen)
*retlen = xiocb.plist.xiocb_buffer.buf_retlen;
return xiocb.xiocb_status;
}
int cfe_open(char *name)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_OPEN;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = 0;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_buffer.buf_length = strlen(name);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.xiocb_handle;
}
int cfe_read(int handle, unsigned char *buffer, int length)
{
return cfe_readblk(handle, 0, buffer, length);
}
int cfe_readblk(int handle, s64 offset, unsigned char *buffer, int length)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_READ;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_retlen;
}
int cfe_setenv(char *name, char *val)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = 0;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = strlen(name);
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
xiocb.plist.xiocb_envbuf.val_length = strlen(val);
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_write(int handle, unsigned char *buffer, int length)
{
return cfe_writeblk(handle, 0, buffer, length);
}
int cfe_writeblk(int handle, s64 offset, unsigned char *buffer, int length)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_WRITE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_retlen;
}
| gpl-2.0 |
makerbot/linux-Birdwing | drivers/net/irda/sir_dongle.c | 12516 | 3508 | /*********************************************************************
*
* sir_dongle.c: manager for serial dongle protocol drivers
*
* Copyright (c) 2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/mutex.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
/**************************************************************************
*
* dongle registration and attachment
*
*/
static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
int irda_register_dongle(struct dongle_driver *new)
{
struct list_head *entry;
struct dongle_driver *drv;
IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
__func__, new->driver_name, new->type);
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
if (new->type == drv->type) {
mutex_unlock(&dongle_list_lock);
return -EEXIST;
}
}
list_add(&new->dongle_list, &dongle_list);
mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_register_dongle);
int irda_unregister_dongle(struct dongle_driver *drv)
{
mutex_lock(&dongle_list_lock);
list_del(&drv->dongle_list);
mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_unregister_dongle);
int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
struct list_head *entry;
const struct dongle_driver *drv = NULL;
int err = -EINVAL;
request_module("irda-dongle-%d", type);
if (dev->dongle_drv != NULL)
return -EBUSY;
/* serialize access to the list of registered dongles */
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
if (drv->type == type)
break;
else
drv = NULL;
}
if (!drv) {
err = -ENODEV;
goto out_unlock; /* no such dongle */
}
/* handling of SMP races with dongle module removal - three cases:
* 1) dongle driver was already unregistered - then we haven't found the
* requested dongle above and are already out here
* 2) the module is already marked deleted but the driver is still
* registered - then the try_module_get() below will fail
* 3) the try_module_get() below succeeds before the module is marked
* deleted - then sys_delete_module() fails and prevents the removal
* because the module is in use.
*/
if (!try_module_get(drv->owner)) {
err = -ESTALE;
goto out_unlock; /* rmmod already pending */
}
dev->dongle_drv = drv;
if (!drv->open || (err=drv->open(dev))!=0)
goto out_reject; /* failed to open driver */
mutex_unlock(&dongle_list_lock);
return 0;
out_reject:
dev->dongle_drv = NULL;
module_put(drv->owner);
out_unlock:
mutex_unlock(&dongle_list_lock);
return err;
}
int sirdev_put_dongle(struct sir_dev *dev)
{
const struct dongle_driver *drv = dev->dongle_drv;
if (drv) {
if (drv->close)
drv->close(dev); /* close this dongle instance */
dev->dongle_drv = NULL; /* unlink the dongle driver */
module_put(drv->owner);/* decrement driver's module refcount */
}
return 0;
}
| gpl-2.0 |
HomuHomu/Kernel-SM-G935D-MM | fs/afs/cache.c | 12772 | 11040 | /* AFS caching stuff
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/sched.h>
#include "internal.h"
static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen);
static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static enum fscache_checkaux afs_vlocation_cache_check_aux(
void *cookie_netfs_data, const void *buffer, uint16_t buflen);
static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
uint64_t *size);
static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t buflen);
static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen);
static void afs_vnode_cache_now_uncached(void *cookie_netfs_data);
struct fscache_netfs afs_cache_netfs = {
.name = "afs",
.version = 0,
};
struct fscache_cookie_def afs_cell_cache_index_def = {
.name = "AFS.cell",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = afs_cell_cache_get_key,
.get_aux = afs_cell_cache_get_aux,
.check_aux = afs_cell_cache_check_aux,
};
struct fscache_cookie_def afs_vlocation_cache_index_def = {
.name = "AFS.vldb",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = afs_vlocation_cache_get_key,
.get_aux = afs_vlocation_cache_get_aux,
.check_aux = afs_vlocation_cache_check_aux,
};
struct fscache_cookie_def afs_volume_cache_index_def = {
.name = "AFS.volume",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = afs_volume_cache_get_key,
};
struct fscache_cookie_def afs_vnode_cache_index_def = {
.name = "AFS.vnode",
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
.get_key = afs_vnode_cache_get_key,
.get_attr = afs_vnode_cache_get_attr,
.get_aux = afs_vnode_cache_get_aux,
.check_aux = afs_vnode_cache_check_aux,
.now_uncached = afs_vnode_cache_now_uncached,
};
/*
* set the key for the index entry
*/
static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_cell *cell = cookie_netfs_data;
uint16_t klen;
_enter("%p,%p,%u", cell, buffer, bufmax);
klen = strlen(cell->name);
if (klen > bufmax)
return 0;
memcpy(buffer, cell->name, klen);
return klen;
}
/*
* provide new auxiliary cache data
*/
static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_cell *cell = cookie_netfs_data;
uint16_t dlen;
_enter("%p,%p,%u", cell, buffer, bufmax);
dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]);
dlen = min(dlen, bufmax);
dlen &= ~(sizeof(cell->vl_addrs[0]) - 1);
memcpy(buffer, cell->vl_addrs, dlen);
return dlen;
}
/*
* check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen)
{
_leave(" = OKAY");
return FSCACHE_CHECKAUX_OKAY;
}
/*****************************************************************************/
/*
* set the key for the index entry
*/
static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_vlocation *vlocation = cookie_netfs_data;
uint16_t klen;
_enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name));
if (klen > bufmax)
return 0;
memcpy(buffer, vlocation->vldb.name, klen);
_leave(" = %u", klen);
return klen;
}
/*
* provide new auxiliary cache data
*/
static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_vlocation *vlocation = cookie_netfs_data;
uint16_t dlen;
_enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
dlen = sizeof(struct afs_cache_vlocation);
dlen -= offsetof(struct afs_cache_vlocation, nservers);
if (dlen > bufmax)
return 0;
memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen);
_leave(" = %u", dlen);
return dlen;
}
/*
* check that the auxiliary data indicates that the entry is still valid
*/
static
enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen)
{
const struct afs_cache_vlocation *cvldb;
struct afs_vlocation *vlocation = cookie_netfs_data;
uint16_t dlen;
_enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen);
/* check the size of the data is what we're expecting */
dlen = sizeof(struct afs_cache_vlocation);
dlen -= offsetof(struct afs_cache_vlocation, nservers);
if (dlen != buflen)
return FSCACHE_CHECKAUX_OBSOLETE;
cvldb = container_of(buffer, struct afs_cache_vlocation, nservers);
/* if what's on disk is more valid than what's in memory, then use the
* VL record from the cache */
if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) {
memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen);
vlocation->valid = 1;
_leave(" = SUCCESS [c->m]");
return FSCACHE_CHECKAUX_OKAY;
}
/* need to update the cache if the cached info differs */
if (memcmp(&vlocation->vldb, buffer, dlen) != 0) {
/* delete if the volume IDs for this name differ */
if (memcmp(&vlocation->vldb.vid, &cvldb->vid,
sizeof(cvldb->vid)) != 0
) {
_leave(" = OBSOLETE");
return FSCACHE_CHECKAUX_OBSOLETE;
}
_leave(" = UPDATE");
return FSCACHE_CHECKAUX_NEEDS_UPDATE;
}
_leave(" = OKAY");
return FSCACHE_CHECKAUX_OKAY;
}
/*****************************************************************************/
/*
* set the key for the volume index entry
*/
static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_volume *volume = cookie_netfs_data;
uint16_t klen;
_enter("{%u},%p,%u", volume->type, buffer, bufmax);
klen = sizeof(volume->type);
if (klen > bufmax)
return 0;
memcpy(buffer, &volume->type, sizeof(volume->type));
_leave(" = %u", klen);
return klen;
}
/*****************************************************************************/
/*
* set the key for the index entry
*/
static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_vnode *vnode = cookie_netfs_data;
uint16_t klen;
_enter("{%x,%x,%llx},%p,%u",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
buffer, bufmax);
klen = sizeof(vnode->fid.vnode);
if (klen > bufmax)
return 0;
memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode));
_leave(" = %u", klen);
return klen;
}
/*
* provide updated file attributes
*/
static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
uint64_t *size)
{
const struct afs_vnode *vnode = cookie_netfs_data;
_enter("{%x,%x,%llx},",
vnode->fid.vnode, vnode->fid.unique,
vnode->status.data_version);
*size = vnode->status.size;
}
/*
* provide new auxiliary cache data
*/
static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct afs_vnode *vnode = cookie_netfs_data;
uint16_t dlen;
_enter("{%x,%x,%Lx},%p,%u",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
buffer, bufmax);
dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version);
if (dlen > bufmax)
return 0;
memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique));
buffer += sizeof(vnode->fid.unique);
memcpy(buffer, &vnode->status.data_version,
sizeof(vnode->status.data_version));
_leave(" = %u", dlen);
return dlen;
}
/*
* check that the auxiliary data indicates that the entry is still valid
*/
static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen)
{
struct afs_vnode *vnode = cookie_netfs_data;
uint16_t dlen;
_enter("{%x,%x,%llx},%p,%u",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
buffer, buflen);
/* check the size of the data is what we're expecting */
dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version);
if (dlen != buflen) {
_leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen);
return FSCACHE_CHECKAUX_OBSOLETE;
}
if (memcmp(buffer,
&vnode->fid.unique,
sizeof(vnode->fid.unique)
) != 0) {
unsigned unique;
memcpy(&unique, buffer, sizeof(unique));
_leave(" = OBSOLETE [uniq %x != %x]",
unique, vnode->fid.unique);
return FSCACHE_CHECKAUX_OBSOLETE;
}
if (memcmp(buffer + sizeof(vnode->fid.unique),
&vnode->status.data_version,
sizeof(vnode->status.data_version)
) != 0) {
afs_dataversion_t version;
memcpy(&version, buffer + sizeof(vnode->fid.unique),
sizeof(version));
_leave(" = OBSOLETE [vers %llx != %llx]",
version, vnode->status.data_version);
return FSCACHE_CHECKAUX_OBSOLETE;
}
_leave(" = SUCCESS");
return FSCACHE_CHECKAUX_OKAY;
}
/*
* indication the cookie is no longer uncached
* - this function is called when the backing store currently caching a cookie
* is removed
* - the netfs should use this to clean up any markers indicating cached pages
* - this is mandatory for any object that may have data
*/
static void afs_vnode_cache_now_uncached(void *cookie_netfs_data)
{
struct afs_vnode *vnode = cookie_netfs_data;
struct pagevec pvec;
pgoff_t first;
int loop, nr_pages;
_enter("{%x,%x,%Lx}",
vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version);
pagevec_init(&pvec, 0);
first = 0;
for (;;) {
/* grab a bunch of pages to clean */
nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping,
first,
PAGEVEC_SIZE - pagevec_count(&pvec));
if (!nr_pages)
break;
for (loop = 0; loop < nr_pages; loop++)
ClearPageFsCache(pvec.pages[loop]);
first = pvec.pages[nr_pages - 1]->index + 1;
pvec.nr = nr_pages;
pagevec_release(&pvec);
cond_resched();
}
_leave("");
}
| gpl-2.0 |
mikshepard/android_kernel_samsung_klte | net/netfilter/xt_iprange.c | 13028 | 3869 | /*
* xt_iprange - Netfilter module to match IP address ranges
*
* (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* (C) CC Computer Consultants GmbH, 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_iprange.h>
static bool
iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct iphdr *iph = ip_hdr(skb);
bool m;
if (info->flags & IPRANGE_SRC) {
m = ntohl(iph->saddr) < ntohl(info->src_min.ip);
m |= ntohl(iph->saddr) > ntohl(info->src_max.ip);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m) {
pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
&info->src_min.ip,
&info->src_max.ip);
return false;
}
}
if (info->flags & IPRANGE_DST) {
m = ntohl(iph->daddr) < ntohl(info->dst_min.ip);
m |= ntohl(iph->daddr) > ntohl(info->dst_max.ip);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m) {
pr_debug("dst IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->daddr,
(info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
&info->dst_min.ip,
&info->dst_max.ip);
return false;
}
}
return true;
}
static inline int
iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
{
unsigned int i;
for (i = 0; i < 4; ++i) {
if (a->s6_addr32[i] != b->s6_addr32[i])
return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
}
return 0;
}
static bool
iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_iprange_mtinfo *info = par->matchinfo;
const struct ipv6hdr *iph = ipv6_hdr(skb);
bool m;
if (info->flags & IPRANGE_SRC) {
m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
if (m) {
pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
&info->src_min.in6,
&info->src_max.in6);
return false;
}
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
if (m) {
pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
&iph->daddr,
(info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
&info->dst_min.in6,
&info->dst_max.in6);
return false;
}
}
return true;
}
static struct xt_match iprange_mt_reg[] __read_mostly = {
{
.name = "iprange",
.revision = 1,
.family = NFPROTO_IPV4,
.match = iprange_mt4,
.matchsize = sizeof(struct xt_iprange_mtinfo),
.me = THIS_MODULE,
},
{
.name = "iprange",
.revision = 1,
.family = NFPROTO_IPV6,
.match = iprange_mt6,
.matchsize = sizeof(struct xt_iprange_mtinfo),
.me = THIS_MODULE,
},
};
static int __init iprange_mt_init(void)
{
return xt_register_matches(iprange_mt_reg, ARRAY_SIZE(iprange_mt_reg));
}
static void __exit iprange_mt_exit(void)
{
xt_unregister_matches(iprange_mt_reg, ARRAY_SIZE(iprange_mt_reg));
}
module_init(iprange_mt_init);
module_exit(iprange_mt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
MODULE_ALIAS("ipt_iprange");
MODULE_ALIAS("ip6t_iprange");
| gpl-2.0 |
slayher/android_kernel_samsung_trlte | drivers/misc/uid_stat.c | 229 | 3791 | /* drivers/misc/uid_stat.c
*
* Copyright (C) 2008 - 2009 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/uid_stat.h>
#include <net/activity_stats.h>
static DEFINE_SPINLOCK(uid_lock);
static LIST_HEAD(uid_list);
static struct proc_dir_entry *parent;
struct uid_stat {
struct list_head link;
uid_t uid;
atomic_t tcp_rcv;
atomic_t tcp_snd;
};
static struct uid_stat *find_uid_stat(uid_t uid) {
struct uid_stat *entry;
list_for_each_entry(entry, &uid_list, link) {
if (entry->uid == uid) {
return entry;
}
}
return NULL;
}
static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
{
unsigned int bytes;
atomic_t *counter = m->private;
bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
return seq_printf(m, "%u\n", bytes);
}
static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
{
return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
}
static const struct file_operations uid_stat_read_atomic_int_fops = {
.open = uid_stat_read_atomic_int_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* Create a new entry for tracking the specified uid. */
static struct uid_stat *create_stat(uid_t uid) {
struct uid_stat *new_uid;
/* Create the uid stat struct and append it to the list. */
new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
if (!new_uid)
return NULL;
new_uid->uid = uid;
/* Counters start at INT_MIN, so we can track 4GB of network traffic. */
atomic_set(&new_uid->tcp_rcv, INT_MIN);
atomic_set(&new_uid->tcp_snd, INT_MIN);
list_add_tail(&new_uid->link, &uid_list);
return new_uid;
}
static void create_stat_proc(struct uid_stat *new_uid)
{
char uid_s[32];
struct proc_dir_entry *entry;
sprintf(uid_s, "%d", new_uid->uid);
entry = proc_mkdir(uid_s, parent);
/* Keep reference to uid_stat so we know what uid to read stats from. */
proc_create_data("tcp_snd", S_IRUGO, entry,
&uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
proc_create_data("tcp_rcv", S_IRUGO, entry,
&uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
}
static struct uid_stat *find_or_create_uid_stat(uid_t uid)
{
struct uid_stat *entry;
unsigned long flags;
spin_lock_irqsave(&uid_lock, flags);
entry = find_uid_stat(uid);
if (entry) {
spin_unlock_irqrestore(&uid_lock, flags);
return entry;
}
entry = create_stat(uid);
spin_unlock_irqrestore(&uid_lock, flags);
if (entry)
create_stat_proc(entry);
return entry;
}
int uid_stat_tcp_snd(uid_t uid, int size) {
struct uid_stat *entry;
activity_stats_update();
entry = find_or_create_uid_stat(uid);
if (!entry)
return -1;
atomic_add(size, &entry->tcp_snd);
return 0;
}
int uid_stat_tcp_rcv(uid_t uid, int size) {
struct uid_stat *entry;
activity_stats_update();
entry = find_or_create_uid_stat(uid);
if (!entry)
return -1;
atomic_add(size, &entry->tcp_rcv);
return 0;
}
static int __init uid_stat_init(void)
{
parent = proc_mkdir("uid_stat", NULL);
if (!parent) {
pr_err("uid_stat: failed to create proc entry\n");
return -1;
}
return 0;
}
__initcall(uid_stat_init);
| gpl-2.0 |
iAMr00t/android_kernel_huawei_msm8916 | drivers/media/radio/radio-iris-transport.c | 485 | 4859 | /*
* Qualcomm's FM Shared Memory Transport Driver
*
* FM HCI_SMD ( FM HCI Shared Memory Driver) is Qualcomm's Shared memory driver
* for the HCI protocol. This file is based on drivers/bluetooth/hci_vhci.c
*
* Copyright (c) 2000-2001, 2011-2012, 2014 The Linux Foundation.
* All rights reserved.
*
* Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
* Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <soc/qcom/smd.h>
#include <media/radio-iris.h>
struct radio_data {
struct radio_hci_dev *hdev;
struct tasklet_struct rx_task;
struct smd_channel *fm_channel;
};
struct radio_data hs;
static struct work_struct *reset_worker;
static void radio_hci_smd_destruct(struct radio_hci_dev *hdev)
{
radio_hci_unregister_dev(hs.hdev);
}
static void radio_hci_smd_recv_event(unsigned long temp)
{
int len;
int rc;
struct sk_buff *skb;
unsigned char *buf;
struct radio_data *hsmd = &hs;
len = smd_read_avail(hsmd->fm_channel);
while (len) {
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
FMDERR("Memory not allocated for the socket");
return;
}
buf = kmalloc(len, GFP_ATOMIC);
if (!buf) {
kfree_skb(skb);
FMDERR("Error in allocating buffer memory");
return;
}
rc = smd_read(hsmd->fm_channel, (void *)buf, len);
memcpy(skb_put(skb, len), buf, len);
skb_orphan(skb);
skb->dev = (struct net_device *)hs.hdev;
rc = radio_hci_recv_frame(skb);
kfree(buf);
len = smd_read_avail(hsmd->fm_channel);
}
}
static int radio_hci_smd_send_frame(struct sk_buff *skb)
{
int len = 0;
len = smd_write(hs.fm_channel, skb->data, skb->len);
if (len < skb->len) {
FMDERR("Failed to write Data %d", len);
kfree_skb(skb);
return -ENODEV;
}
kfree_skb(skb);
return 0;
}
static void send_disable_event(struct work_struct *worker)
{
struct sk_buff *skb;
unsigned char buf[6] = { 0x0f, 0x04, 0x01, 0x02, 0x4c, 0x00 };
int len = sizeof(buf);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
FMDERR("Memory not allocated for the socket");
kfree(worker);
return;
}
FMDERR("FM INSERT DISABLE Rx Event");
memcpy(skb_put(skb, len), buf, len);
skb_orphan(skb);
skb->dev = (struct net_device *)hs.hdev;
radio_hci_recv_frame(skb);
kfree(worker);
}
static void radio_hci_smd_notify_cmd(void *data, unsigned int event)
{
struct radio_hci_dev *hdev = hs.hdev;
if (!hdev) {
FMDERR("Frame for unknown HCI device (hdev=NULL)");
return;
}
switch (event) {
case SMD_EVENT_DATA:
tasklet_schedule(&hs.rx_task);
break;
case SMD_EVENT_OPEN:
break;
case SMD_EVENT_CLOSE:
reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC);
if (!reset_worker) {
FMDERR("Out of memory");
break;
}
INIT_WORK(reset_worker, send_disable_event);
schedule_work(reset_worker);
break;
default:
break;
}
}
static int radio_hci_smd_register_dev(struct radio_data *hsmd)
{
struct radio_hci_dev *hdev;
int rc;
if (hsmd == NULL)
return -ENODEV;
hdev = kmalloc(sizeof(struct radio_hci_dev), GFP_KERNEL);
if (hdev == NULL)
return -ENODEV;
hsmd->hdev = hdev;
tasklet_init(&hsmd->rx_task, radio_hci_smd_recv_event,
(unsigned long) hsmd);
hdev->send = radio_hci_smd_send_frame;
hdev->destruct = radio_hci_smd_destruct;
/* Open the SMD Channel and device and register the callback function */
rc = smd_named_open_on_edge("APPS_FM", SMD_APPS_WCNSS,
&hsmd->fm_channel, hdev, radio_hci_smd_notify_cmd);
if (rc < 0) {
FMDERR("Cannot open the command channel");
hsmd->hdev = NULL;
kfree(hdev);
return -ENODEV;
}
smd_disable_read_intr(hsmd->fm_channel);
if (radio_hci_register_dev(hdev) < 0) {
FMDERR("Can't register HCI device");
smd_close(hsmd->fm_channel);
hsmd->hdev = NULL;
kfree(hdev);
return -ENODEV;
}
return 0;
}
static void radio_hci_smd_deregister(void)
{
smd_close(hs.fm_channel);
hs.fm_channel = 0;
}
static int radio_hci_smd_init(void)
{
return radio_hci_smd_register_dev(&hs);
}
module_init(radio_hci_smd_init);
static void __exit radio_hci_smd_exit(void)
{
radio_hci_smd_deregister();
}
module_exit(radio_hci_smd_exit);
MODULE_DESCRIPTION("Bluetooth SMD driver");
MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
dirkbehme/linux-renesas-rcar-gen3 | arch/s390/pci/pci_mmio.c | 1253 | 2408 | /*
* Access to PCI I/O memory from user space programs.
*
* Copyright IBM Corp. 2014
* Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/pci.h>
static long get_pfn(unsigned long user_addr, unsigned long access,
unsigned long *pfn)
{
struct vm_area_struct *vma;
long ret;
down_read(¤t->mm->mmap_sem);
ret = -EINVAL;
vma = find_vma(current->mm, user_addr);
if (!vma)
goto out;
ret = -EACCES;
if (!(vma->vm_flags & access))
goto out;
ret = follow_pfn(vma, user_addr, pfn);
out:
up_read(¤t->mm->mmap_sem);
return ret;
}
SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
const void __user *, user_buffer, size_t, length)
{
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
unsigned long pfn;
long ret;
if (!zpci_is_enabled())
return -ENODEV;
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
return -EINVAL;
if (length > 64) {
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
} else
buf = local_buf;
ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
if (ret)
goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
goto out;
if (copy_from_user(buf, user_buffer, length))
goto out;
ret = zpci_memcpy_toio(io_addr, buf, length);
out:
if (buf != local_buf)
kfree(buf);
return ret;
}
SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
void __user *, user_buffer, size_t, length)
{
u8 local_buf[64];
void __iomem *io_addr;
void *buf;
unsigned long pfn;
long ret;
if (!zpci_is_enabled())
return -ENODEV;
if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
return -EINVAL;
if (length > 64) {
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
return -ENOMEM;
} else
buf = local_buf;
ret = get_pfn(mmio_addr, VM_READ, &pfn);
if (ret)
goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
ret = -EFAULT;
goto out;
}
ret = zpci_memcpy_fromio(buf, io_addr, length);
if (ret)
goto out;
if (copy_to_user(user_buffer, buf, length))
ret = -EFAULT;
out:
if (buf != local_buf)
kfree(buf);
return ret;
}
| gpl-2.0 |
AICP/kernel_samsung_tuna | sound/soc/s6000/s6000-pcm.c | 1253 | 14308 | /*
* ALSA PCM interface for the Stetch s6000 family
*
* Author: Daniel Gloeckner, <dg@emlix.com>
* Copyright: (C) 2009 emlix GmbH <info@emlix.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include <variant/dmac.h>
#include "s6000-pcm.h"
#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
#define S6_PCM_PREALLOCATE_MAX (2048 * 1024)
static struct snd_pcm_hardware s6000_pcm_hardware = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX),
.formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 | \
SNDRV_PCM_RATE_8000_192000),
.rate_min = 0,
.rate_max = 1562500,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = 0x7ffffff0,
.period_bytes_min = 16,
.period_bytes_max = 0xfffff0,
.periods_min = 2,
.periods_max = 1024, /* no limit */
.fifo_size = 0,
};
struct s6000_runtime_data {
spinlock_t lock;
int period; /* current DMA period */
};
static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int channel;
unsigned int period_size;
unsigned int dma_offset;
dma_addr_t dma_pos;
dma_addr_t src, dst;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
period_size = snd_pcm_lib_period_bytes(substream);
dma_offset = prtd->period * period_size;
dma_pos = runtime->dma_addr + dma_offset;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = dma_pos;
dst = par->sif_out;
channel = par->dma_out;
} else {
src = par->sif_in;
dst = dma_pos;
channel = par->dma_in;
}
if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)))
return;
if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) {
printk(KERN_ERR "s6000-pcm: fifo full\n");
return;
}
BUG_ON(period_size & 15);
s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
src, dst, period_size);
prtd->period++;
if (unlikely(prtd->period >= runtime->periods))
prtd->period = 0;
}
static irqreturn_t s6000_pcm_irq(int irq, void *data)
{
struct snd_pcm *pcm = data;
struct snd_soc_pcm_runtime *runtime = pcm->private_data;
struct s6000_runtime_data *prtd;
unsigned int has_xrun;
int i, ret = IRQ_NONE;
for (i = 0; i < 2; ++i) {
struct snd_pcm_substream *substream = pcm->streams[i].substream;
struct s6000_pcm_dma_params *params =
snd_soc_dai_get_dma_data(runtime->cpu_dai, substream);
u32 channel;
unsigned int pending;
if (substream == SNDRV_PCM_STREAM_PLAYBACK)
channel = params->dma_out;
else
channel = params->dma_in;
has_xrun = params->check_xrun(runtime->cpu_dai);
if (!channel)
continue;
if (unlikely(has_xrun & (1 << i)) &&
substream->runtime &&
snd_pcm_running(substream)) {
dev_dbg(pcm->dev, "xrun\n");
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
ret = IRQ_HANDLED;
}
pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel));
if (pending & 1) {
ret = IRQ_HANDLED;
if (likely(substream->runtime &&
snd_pcm_running(substream))) {
snd_pcm_period_elapsed(substream);
dev_dbg(pcm->dev, "period elapsed %x %x\n",
s6dmac_cur_src(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)),
s6dmac_cur_dst(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)));
prtd = substream->runtime->private_data;
spin_lock(&prtd->lock);
s6000_pcm_enqueue_dma(substream);
spin_unlock(&prtd->lock);
}
}
if (unlikely(pending & ~7)) {
if (pending & (1 << 3))
printk(KERN_WARNING
"s6000-pcm: DMA %x Underflow\n",
channel);
if (pending & (1 << 4))
printk(KERN_WARNING
"s6000-pcm: DMA %x Overflow\n",
channel);
if (pending & 0x1e0)
printk(KERN_WARNING
"s6000-pcm: DMA %x Master Error "
"(mask %x)\n",
channel, pending >> 5);
}
}
return ret;
}
static int s6000_pcm_start(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
unsigned long flags;
int srcinc;
u32 dma;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock_irqsave(&prtd->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
srcinc = 1;
dma = par->dma_out;
} else {
srcinc = 0;
dma = par->dma_in;
}
s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
1 /* priority 1 (0 is max) */,
0 /* peripheral requests w/o xfer length mode */,
srcinc /* source address increment */,
srcinc^1 /* destination address increment */,
0 /* chunksize 0 (skip impossible on this dma) */,
0 /* source skip after chunk (impossible) */,
0 /* destination skip after chunk (impossible) */,
4 /* 16 byte burst size */,
-1 /* don't conserve bandwidth */,
0 /* low watermark irq descriptor threshold */,
0 /* disable hardware timestamps */,
1 /* enable channel */);
s6000_pcm_enqueue_dma(substream);
s6000_pcm_enqueue_dma(substream);
spin_unlock_irqrestore(&prtd->lock, flags);
return 0;
}
static int s6000_pcm_stop(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
unsigned long flags;
u32 channel;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
channel = par->dma_out;
else
channel = par->dma_in;
s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel), 0);
spin_lock_irqsave(&prtd->lock, flags);
s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel));
spin_unlock_irqrestore(&prtd->lock, flags);
return 0;
}
static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int ret;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
ret = par->trigger(substream, cmd, 0);
if (ret < 0)
return ret;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = s6000_pcm_start(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = s6000_pcm_stop(substream);
break;
default:
ret = -EINVAL;
}
if (ret < 0)
return ret;
return par->trigger(substream, cmd, 1);
}
static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
prtd->period = 0;
return 0;
}
static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
unsigned long flags;
unsigned int offset;
dma_addr_t count;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock_irqsave(&prtd->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out),
DMA_INDEX_CHNL(par->dma_out));
else
count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in),
DMA_INDEX_CHNL(par->dma_in));
count -= runtime->dma_addr;
spin_unlock_irqrestore(&prtd->lock, flags);
offset = bytes_to_frames(runtime, count);
if (unlikely(offset >= runtime->buffer_size))
offset = 0;
return offset;
}
static int s6000_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd;
int ret;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
return ret;
if (par->same_rate) {
int rate;
spin_lock(&par->lock); /* needed? */
rate = par->rate;
spin_unlock(&par->lock);
if (rate != -1) {
ret = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_RATE,
rate, rate);
if (ret < 0)
return ret;
}
}
prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
if (prtd == NULL)
return -ENOMEM;
spin_lock_init(&prtd->lock);
runtime->private_data = prtd;
return 0;
}
static int s6000_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
kfree(prtd);
return 0;
}
static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0) {
printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
return ret;
}
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
if (par->same_rate) {
spin_lock(&par->lock);
if (par->rate == -1 ||
!(par->in_use & ~(1 << substream->stream))) {
par->rate = params_rate(hw_params);
par->in_use |= 1 << substream->stream;
} else if (params_rate(hw_params) != par->rate) {
snd_pcm_lib_free_pages(substream);
par->in_use &= ~(1 << substream->stream);
ret = -EBUSY;
}
spin_unlock(&par->lock);
}
return ret;
}
static int s6000_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par =
snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock(&par->lock);
par->in_use &= ~(1 << substream->stream);
if (!par->in_use)
par->rate = -1;
spin_unlock(&par->lock);
return snd_pcm_lib_free_pages(substream);
}
static struct snd_pcm_ops s6000_pcm_ops = {
.open = s6000_pcm_open,
.close = s6000_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = s6000_pcm_hw_params,
.hw_free = s6000_pcm_hw_free,
.trigger = s6000_pcm_trigger,
.prepare = s6000_pcm_prepare,
.pointer = s6000_pcm_pointer,
};
static void s6000_pcm_free(struct snd_pcm *pcm)
{
struct snd_soc_pcm_runtime *runtime = pcm->private_data;
struct s6000_pcm_dma_params *params =
snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
free_irq(params->irq, pcm);
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
{
struct snd_card *card = runtime->card->snd_card;
struct snd_soc_dai *dai = runtime->cpu_dai;
struct snd_pcm *pcm = runtime->pcm;
struct s6000_pcm_dma_params *params;
int res;
params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
pcm->streams[0].substream);
if (!card->dev->dma_mask)
card->dev->dma_mask = &s6000_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (params->dma_in) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
DMA_INDEX_CHNL(params->dma_in));
s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in),
DMA_INDEX_CHNL(params->dma_in));
}
if (params->dma_out) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out),
DMA_INDEX_CHNL(params->dma_out));
s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out),
DMA_INDEX_CHNL(params->dma_out));
}
res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
"s6000-audio", pcm);
if (res) {
printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
return res;
}
res = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
card->dev,
S6_PCM_PREALLOCATE_SIZE,
S6_PCM_PREALLOCATE_MAX);
if (res)
printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
spin_lock_init(¶ms->lock);
params->in_use = 0;
params->rate = -1;
return 0;
}
static struct snd_soc_platform_driver s6000_soc_platform = {
.ops = &s6000_pcm_ops,
.pcm_new = s6000_pcm_new,
.pcm_free = s6000_pcm_free,
};
static int __devinit s6000_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
}
static int __devexit s6000_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver s6000_pcm_driver = {
.driver = {
.name = "s6000-pcm-audio",
.owner = THIS_MODULE,
},
.probe = s6000_soc_platform_probe,
.remove = __devexit_p(s6000_soc_platform_remove),
};
static int __init snd_s6000_pcm_init(void)
{
return platform_driver_register(&s6000_pcm_driver);
}
module_init(snd_s6000_pcm_init);
static void __exit snd_s6000_pcm_exit(void)
{
platform_driver_unregister(&s6000_pcm_driver);
}
module_exit(snd_s6000_pcm_exit);
MODULE_AUTHOR("Daniel Gloeckner");
MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
carburano/KingOfBirds_Kernel | arch/arm/kernel/bios32.c | 2533 | 16998 | /*
* linux/arch/arm/kernel/bios32.c
*
* PCI bios-type initialisation for PCI machines
*
* Bits taken from various places.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <asm/mach/pci.h>
static int debug_pci;
static int use_firmware;
/*
* We can't use pci_find_device() here since we are
* called from interrupt context.
*/
static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 status;
/*
* ignore host bridge - we handle
* that separately
*/
if (dev->bus->number == 0 && dev->devfn == 0)
continue;
pci_read_config_word(dev, PCI_STATUS, &status);
if (status == 0xffff)
continue;
if ((status & status_mask) == 0)
continue;
/* clear the status errors */
pci_write_config_word(dev, PCI_STATUS, status & status_mask);
if (warn)
printk("(%s: %04X) ", pci_name(dev), status);
}
list_for_each_entry(dev, &bus->devices, bus_list)
if (dev->subordinate)
pcibios_bus_report_status(dev->subordinate, status_mask, warn);
}
void pcibios_report_status(u_int status_mask, int warn)
{
struct list_head *l;
list_for_each(l, &pci_root_buses) {
struct pci_bus *bus = pci_bus_b(l);
pcibios_bus_report_status(bus, status_mask, warn);
}
}
/*
* We don't use this to fix the device, but initialisation of it.
* It's not the correct use for this, but it works.
* Note that the arbiter/ISA bridge appears to be buggy, specifically in
* the following area:
* 1. park on CPU
* 2. ISA bridge ping-pong
* 3. ISA bridge master handling of target RETRY
*
* Bug 3 is responsible for the sound DMA grinding to a halt. We now
* live with bug 2.
*/
static void __devinit pci_fixup_83c553(struct pci_dev *dev)
{
/*
* Set memory region to start at address 0, and enable IO
*/
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
dev->resource[0].end -= dev->resource[0].start;
dev->resource[0].start = 0;
/*
* All memory requests from ISA to be channelled to PCI
*/
pci_write_config_byte(dev, 0x48, 0xff);
/*
* Enable ping-pong on bus master to ISA bridge transactions.
* This improves the sound DMA substantially. The fixed
* priority arbiter also helps (see below).
*/
pci_write_config_byte(dev, 0x42, 0x01);
/*
* Enable PCI retry
*/
pci_write_config_byte(dev, 0x40, 0x22);
/*
* We used to set the arbiter to "park on last master" (bit
* 1 set), but unfortunately the CyberPro does not park the
* bus. We must therefore park on CPU. Unfortunately, this
* may trigger yet another bug in the 553.
*/
pci_write_config_byte(dev, 0x83, 0x02);
/*
* Make the ISA DMA request lowest priority, and disable
* rotating priorities completely.
*/
pci_write_config_byte(dev, 0x80, 0x11);
pci_write_config_byte(dev, 0x81, 0x00);
/*
* Route INTA input to IRQ 11, and set IRQ11 to be level
* sensitive.
*/
pci_write_config_word(dev, 0x44, 0xb000);
outb(0x08, 0x4d1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
static void __devinit pci_fixup_unassign(struct pci_dev *dev)
{
dev->resource[0].end -= dev->resource[0].start;
dev->resource[0].start = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
/*
* Prevent the PCI layer from seeing the resources allocated to this device
* if it is the host bridge by marking it as such. These resources are of
* no consequence to the PCI layer (they are handled elsewhere).
*/
static void __devinit pci_fixup_dec21285(struct pci_dev *dev)
{
int i;
if (dev->devfn == 0) {
dev->class &= 0xff;
dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
{
struct resource *r;
int i;
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
r = dev->resource + i;
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
/*
* Put the DEC21142 to sleep
*/
static void __devinit pci_fixup_dec21142(struct pci_dev *dev)
{
pci_write_config_dword(dev, 0x40, 0x80000000);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
/*
* The CY82C693 needs some rather major fixups to ensure that it does
* the right thing. Idea from the Alpha people, with a few additions.
*
* We ensure that the IDE base registers are set to 1f0/3f4 for the
* primary bus, and 170/374 for the secondary bus. Also, hide them
* from the PCI subsystem view as well so we won't try to perform
* our own auto-configuration on them.
*
* In addition, we ensure that the PCI IDE interrupts are routed to
* IRQ 14 and IRQ 15 respectively.
*
* The above gets us to a point where the IDE on this device is
* functional. However, The CY82C693U _does not work_ in bus
* master mode without locking the PCI bus solid.
*/
static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
{
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
u32 base0, base1;
if (dev->class & 0x80) { /* primary */
base0 = 0x1f0;
base1 = 0x3f4;
} else { /* secondary */
base0 = 0x170;
base1 = 0x374;
}
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
base0 | PCI_BASE_ADDRESS_SPACE_IO);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
base1 | PCI_BASE_ADDRESS_SPACE_IO);
dev->resource[0].start = 0;
dev->resource[0].end = 0;
dev->resource[0].flags = 0;
dev->resource[1].start = 0;
dev->resource[1].end = 0;
dev->resource[1].flags = 0;
} else if (PCI_FUNC(dev->devfn) == 0) {
/*
* Setup IDE IRQ routing.
*/
pci_write_config_byte(dev, 0x4b, 14);
pci_write_config_byte(dev, 0x4c, 15);
/*
* Disable FREQACK handshake, enable USB.
*/
pci_write_config_byte(dev, 0x4d, 0x41);
/*
* Enable PCI retry, and PCI post-write buffer.
*/
pci_write_config_byte(dev, 0x44, 0x17);
/*
* Enable ISA master and DMA post write buffering.
*/
pci_write_config_byte(dev, 0x45, 0x03);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
static void __init pci_fixup_it8152(struct pci_dev *dev)
{
int i;
/* fixup for ITE 8152 devices */
/* FIXME: add defines for class 0x68000 and 0x80103 */
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
dev->class == 0x68000 ||
dev->class == 0x80103) {
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
{
if (debug_pci)
printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev));
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
/*
* If the bus contains any of these devices, then we must not turn on
* parity checking of any kind. Currently this is CyberPro 20x0 only.
*/
static inline int pdev_bad_for_parity(struct pci_dev *dev)
{
return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
(dev->device == PCI_DEVICE_ID_INTERG_2000 ||
dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
(dev->vendor == PCI_VENDOR_ID_ITE &&
dev->device == PCI_DEVICE_ID_ITE_8152));
}
/*
* Adjust the device resources from bus-centric to Linux-centric.
*/
static void __devinit
pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
{
resource_size_t offset;
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (dev->resource[i].start == 0)
continue;
if (dev->resource[i].flags & IORESOURCE_MEM)
offset = root->mem_offset;
else
offset = root->io_offset;
dev->resource[i].start += offset;
dev->resource[i].end += offset;
}
}
static void __devinit
pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
{
struct pci_dev *dev = bus->self;
int i;
if (!dev) {
/*
* Assign root bus resources.
*/
for (i = 0; i < 3; i++)
bus->resource[i] = root->resource[i];
}
}
/*
* pcibios_fixup_bus - Called after each bus is probed,
* but before its children are examined.
*/
void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_sys_data *root = bus->sysdata;
struct pci_dev *dev;
u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
pbus_assign_bus_resources(bus, root);
/*
* Walk the devices on this bus, working out what we can
* and can't support.
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 status;
pdev_fixup_device_resources(root, dev);
pci_read_config_word(dev, PCI_STATUS, &status);
/*
* If any device on this bus does not support fast back
* to back transfers, then the bus as a whole is not able
* to support them. Having fast back to back transfers
* on saves us one PCI cycle per transaction.
*/
if (!(status & PCI_STATUS_FAST_BACK))
features &= ~PCI_COMMAND_FAST_BACK;
if (pdev_bad_for_parity(dev))
features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
break;
case PCI_CLASS_BRIDGE_CARDBUS:
pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
break;
}
}
/*
* Now walk the devices again, this time setting them up.
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 cmd;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= features;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
L1_CACHE_BYTES >> 2);
}
/*
* Propagate the flags to the PCI bridge.
*/
if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
if (features & PCI_COMMAND_FAST_BACK)
bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
if (features & PCI_COMMAND_PARITY)
bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
}
/*
* Report what we did for this bus
*/
printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
}
/*
* Convert from Linux-centric to bus-centric addresses for bridge devices.
*/
void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
struct pci_sys_data *root = dev->sysdata;
unsigned long offset = 0;
if (res->flags & IORESOURCE_IO)
offset = root->io_offset;
if (res->flags & IORESOURCE_MEM)
offset = root->mem_offset;
region->start = res->start - offset;
region->end = res->end - offset;
}
void __devinit
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region)
{
struct pci_sys_data *root = dev->sysdata;
unsigned long offset = 0;
if (res->flags & IORESOURCE_IO)
offset = root->io_offset;
if (res->flags & IORESOURCE_MEM)
offset = root->mem_offset;
res->start = region->start + offset;
res->end = region->end + offset;
}
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_fixup_bus);
EXPORT_SYMBOL(pcibios_resource_to_bus);
EXPORT_SYMBOL(pcibios_bus_to_resource);
#endif
/*
* Swizzle the device pin each time we cross a bridge.
* This might update pin and returns the slot number.
*/
static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
{
struct pci_sys_data *sys = dev->sysdata;
int slot = 0, oldpin = *pin;
if (sys->swizzle)
slot = sys->swizzle(dev, pin);
if (debug_pci)
printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
pci_name(dev), oldpin, *pin, slot);
return slot;
}
/*
* Map a slot/pin to an IRQ.
*/
static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
struct pci_sys_data *sys = dev->sysdata;
int irq = -1;
if (sys->map_irq)
irq = sys->map_irq(dev, slot, pin);
if (debug_pci)
printk("PCI: %s mapping slot %d pin %d => irq %d\n",
pci_name(dev), slot, pin, irq);
return irq;
}
static void __init pcibios_init_hw(struct hw_pci *hw)
{
struct pci_sys_data *sys = NULL;
int ret;
int nr, busnr;
for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
if (!sys)
panic("PCI: unable to allocate sys data!");
#ifdef CONFIG_PCI_DOMAINS
sys->domain = hw->domain;
#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
sys->map_irq = hw->map_irq;
sys->resource[0] = &ioport_resource;
sys->resource[1] = &iomem_resource;
ret = hw->setup(nr, sys);
if (ret > 0) {
sys->bus = hw->scan(nr, sys);
if (!sys->bus)
panic("PCI: unable to scan bus!");
busnr = sys->bus->subordinate + 1;
list_add(&sys->node, &hw->buses);
} else {
kfree(sys);
if (ret < 0)
break;
}
}
}
void __init pci_common_init(struct hw_pci *hw)
{
struct pci_sys_data *sys;
INIT_LIST_HEAD(&hw->buses);
if (hw->preinit)
hw->preinit();
pcibios_init_hw(hw);
if (hw->postinit)
hw->postinit();
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
list_for_each_entry(sys, &hw->buses, node) {
struct pci_bus *bus = sys->bus;
if (!use_firmware) {
/*
* Size the bridge windows.
*/
pci_bus_size_bridges(bus);
/*
* Assign resources.
*/
pci_bus_assign_resources(bus);
/*
* Enable bridges
*/
pci_enable_bridges(bus);
}
/*
* Tell drivers about devices found.
*/
pci_bus_add_devices(bus);
}
}
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
} else if (!strcmp(str, "firmware")) {
use_firmware = 1;
return NULL;
}
return str;
}
/*
* From arch/i386/kernel/pci-i386.c:
*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might be mirrored at 0x0100-0x03ff..
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
start = (start + align - 1) & ~(align - 1);
return start;
}
/**
* pcibios_enable_device - Enable I/O and memory.
* @dev: PCI device to be enabled
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx = 0; idx < 6; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
continue;
r = dev->resource + idx;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because"
" of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
/*
* Bridges (eg, cardbus bridges) need to be fully enabled
*/
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
struct pci_sys_data *root = dev->sysdata;
unsigned long phys;
if (mmap_state == pci_mmap_io) {
return -EINVAL;
} else {
phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
}
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
| gpl-2.0 |
jamieg71/android_kernel_lge_hammerhead | net/mac80211/rc80211_minstrel_ht.c | 4837 | 23838 | /*
* Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
#define AVG_PKT_SIZE 1200
#define SAMPLE_COLUMNS 10
#define EWMA_LEVEL 75
/* Number of bits for an average sized packet */
#define MCS_NBITS (AVG_PKT_SIZE << 3)
/* Number of symbols for a packet with (bps) bits per symbol */
#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
/* Transmission time for a packet containing (syms) symbols */
#define MCS_SYMBOL_TIME(sgi, syms) \
(sgi ? \
((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \
(syms) << 2 /* syms * 4 us */ \
)
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define GROUP_IDX(_streams, _sgi, _ht40) \
MINSTREL_MAX_STREAMS * 2 * _ht40 + \
MINSTREL_MAX_STREAMS * _sgi + \
_streams - 1
/* MCS rate information for an MCS group */
#define MCS_GROUP(_streams, _sgi, _ht40) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.flags = \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
} \
}
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
*
* Sortorder has to be fixed for GROUP_IDX macro to be applicable:
* HT40 -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, 0),
MCS_GROUP(2, 0, 0),
#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 0, 0),
#endif
MCS_GROUP(1, 1, 0),
MCS_GROUP(2, 1, 0),
#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 1, 0),
#endif
MCS_GROUP(1, 0, 1),
MCS_GROUP(2, 0, 1),
#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 0, 1),
#endif
MCS_GROUP(1, 1, 1),
MCS_GROUP(2, 1, 1),
#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 1, 1),
#endif
};
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
/*
* Perform EWMA (Exponentially Weighted Moving Average) calculation
*/
static int
minstrel_ewma(int old, int new, int weight)
{
return (new * (100 - weight) + old * weight) / 100;
}
/*
* Look up an MCS group index based on mac80211 rate information
*/
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}
/*
* Recalculate success probabilities and counters for a rate using EWMA
*/
static void
minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
{
if (unlikely(mr->attempts > 0)) {
mr->sample_skipped = 0;
mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
if (!mr->att_hist)
mr->probability = mr->cur_prob;
else
mr->probability = minstrel_ewma(mr->probability,
mr->cur_prob, EWMA_LEVEL);
mr->att_hist += mr->attempts;
mr->succ_hist += mr->success;
} else {
mr->sample_skipped++;
}
mr->last_success = mr->success;
mr->last_attempts = mr->attempts;
mr->success = 0;
mr->attempts = 0;
}
/*
* Calculate throughput based on the average A-MPDU length, taking into account
* the expected number of retransmissions and their expected length
*/
static void
minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
{
struct minstrel_rate_stats *mr;
unsigned int usecs;
mr = &mi->groups[group].rates[rate];
if (mr->probability < MINSTREL_FRAC(1, 10)) {
mr->cur_tp = 0;
return;
}
usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
usecs += minstrel_mcs_groups[group].duration[rate];
mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
}
/*
* Update rate statistics and select new primary rates
*
* Rules for rate selection:
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
* - as long as the max prob rate has a probability of more than 3/4, pick
* higher throughput rates, even if the probablity is a bit lower
*/
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mr;
int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
int group, i, index;
if (mi->ampdu_packets > 0) {
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
mi->ampdu_len = 0;
mi->ampdu_packets = 0;
}
mi->sample_slow = 0;
mi->sample_count = 0;
mi->max_tp_rate = 0;
mi->max_tp_rate2 = 0;
mi->max_prob_rate = 0;
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
cur_prob = 0;
cur_prob_tp = 0;
cur_tp = 0;
cur_tp2 = 0;
mg = &mi->groups[group];
if (!mg->supported)
continue;
mg->max_tp_rate = 0;
mg->max_tp_rate2 = 0;
mg->max_prob_rate = 0;
mi->sample_count++;
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mg->supported & BIT(i)))
continue;
mr = &mg->rates[i];
mr->retry_updated = false;
index = MCS_GROUP_RATES * group + i;
minstrel_calc_rate_ewma(mr);
minstrel_ht_calc_tp(mi, group, i);
if (!mr->cur_tp)
continue;
/* ignore the lowest rate of each single-stream group */
if (!i && minstrel_mcs_groups[group].streams == 1)
continue;
if ((mr->cur_tp > cur_prob_tp && mr->probability >
MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
mg->max_prob_rate = index;
cur_prob = mr->probability;
cur_prob_tp = mr->cur_tp;
}
if (mr->cur_tp > cur_tp) {
swap(index, mg->max_tp_rate);
cur_tp = mr->cur_tp;
mr = minstrel_get_ratestats(mi, index);
}
if (index >= mg->max_tp_rate)
continue;
if (mr->cur_tp > cur_tp2) {
mg->max_tp_rate2 = index;
cur_tp2 = mr->cur_tp;
}
}
}
/* try to sample up to half of the available rates during each interval */
mi->sample_count *= 4;
cur_prob = 0;
cur_prob_tp = 0;
cur_tp = 0;
cur_tp2 = 0;
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
if (!mg->supported)
continue;
mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
if (cur_prob_tp < mr->cur_tp &&
minstrel_mcs_groups[group].streams == 1) {
mi->max_prob_rate = mg->max_prob_rate;
cur_prob = mr->cur_prob;
cur_prob_tp = mr->cur_tp;
}
mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
if (cur_tp < mr->cur_tp) {
mi->max_tp_rate2 = mi->max_tp_rate;
cur_tp2 = cur_tp;
mi->max_tp_rate = mg->max_tp_rate;
cur_tp = mr->cur_tp;
}
mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
if (cur_tp2 < mr->cur_tp) {
mi->max_tp_rate2 = mg->max_tp_rate2;
cur_tp2 = mr->cur_tp;
}
}
mi->stats_update = jiffies;
}
static bool
minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
{
if (rate->idx < 0)
return false;
if (!rate->count)
return false;
return !!(rate->flags & IEEE80211_TX_RC_MCS);
}
static void
minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
for (;;) {
mi->sample_group++;
mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
mg = &mi->groups[mi->sample_group];
if (!mg->supported)
continue;
if (++mg->index >= MCS_GROUP_RATES) {
mg->index = 0;
if (++mg->column >= ARRAY_SIZE(sample_table))
mg->column = 0;
}
break;
}
}
static void
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
bool primary)
{
int group, orig_group;
orig_group = group = *idx / MCS_GROUP_RATES;
while (group > 0) {
group--;
if (!mi->groups[group].supported)
continue;
if (minstrel_mcs_groups[group].streams >
minstrel_mcs_groups[orig_group].streams)
continue;
if (primary)
*idx = mi->groups[group].max_tp_rate;
else
*idx = mi->groups[group].max_tp_rate2;
break;
}
}
static void
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
u16 tid;
if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
return;
if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
return;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
return;
ieee80211_start_tx_ba_session(pubsta, tid, 5000);
}
static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2;
struct minstrel_priv *mp = priv;
bool last = false;
int group;
int i = 0;
if (!msp->is_ht)
return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
info->status.ampdu_ack_len =
(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
info->status.ampdu_len = 1;
}
mi->ampdu_packets++;
mi->ampdu_len += info->status.ampdu_len;
if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
mi->sample_tries = 2;
mi->sample_count--;
}
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
mi->sample_packets += info->status.ampdu_len;
for (i = 0; !last; i++) {
last = (i == IEEE80211_TX_MAX_RATES - 1) ||
!minstrel_ht_txstat_valid(&ar[i + 1]);
if (!minstrel_ht_txstat_valid(&ar[i]))
break;
group = minstrel_ht_get_group_idx(&ar[i]);
rate = &mi->groups[group].rates[ar[i].idx % 8];
if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += ar[i].count * info->status.ampdu_len;
}
/*
* check for sudden death of spatial multiplexing,
* downgrade to a lower number of streams if necessary.
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
if (rate->attempts > 30 &&
MINSTREL_FRAC(rate->success, rate->attempts) <
MINSTREL_FRAC(20, 100))
minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
if (rate2->attempts > 30 &&
MINSTREL_FRAC(rate2->success, rate2->attempts) <
MINSTREL_FRAC(20, 100))
minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
minstrel_aggr_check(sta, skb);
}
}
static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
struct minstrel_rate_stats *mr;
const struct mcs_group *group;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
unsigned int t_slot = 9; /* FIXME */
unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
mr = minstrel_get_ratestats(mi, index);
if (mr->probability < MINSTREL_FRAC(1, 10)) {
mr->retry_count = 1;
mr->retry_count_rtscts = 1;
return;
}
mr->retry_count = 2;
mr->retry_count_rtscts = 2;
mr->retry_updated = true;
group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
/* Contention time for first 2 tries */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
ctime += (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
/* Total TX time for data and Contention after first 2 tries */
tx_time = ctime + 2 * (mi->overhead + tx_time_data);
tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data);
/* See how many more tries we can fit inside segment size */
do {
/* Contention time for this try */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
/* Total TX time after this try */
tx_time += ctime + mi->overhead + tx_time_data;
tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data;
if (tx_time_rtscts < mp->segment_size)
mr->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
(++mr->retry_count < mp->max_retry));
}
static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate, int index,
bool sample, bool rtscts)
{
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
struct minstrel_rate_stats *mr;
mr = minstrel_get_ratestats(mi, index);
if (!mr->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
if (sample)
rate->count = 1;
else if (mr->probability < MINSTREL_FRAC(20, 100))
rate->count = 2;
else if (rtscts)
rate->count = mr->retry_count_rtscts;
else
rate->count = mr->retry_count;
rate->flags = IEEE80211_TX_RC_MCS | group->flags;
if (rtscts)
rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
}
static inline int
minstrel_get_duration(int index)
{
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
return group->duration[index % MCS_GROUP_RATES];
}
static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_rate_stats *mr;
struct minstrel_mcs_group_data *mg;
int sample_idx = 0;
if (mi->sample_wait > 0) {
mi->sample_wait--;
return -1;
}
if (!mi->sample_tries)
return -1;
mi->sample_tries--;
mg = &mi->groups[mi->sample_group];
sample_idx = sample_table[mg->column][mg->index];
mr = &mg->rates[sample_idx];
sample_idx += mi->sample_group * MCS_GROUP_RATES;
minstrel_next_sample_idx(mi);
/*
* Sampling might add some overhead (RTS, no aggregation)
* to the frame. Hence, don't use sampling for the currently
* used max TP rate.
*/
if (sample_idx == mi->max_tp_rate)
return -1;
/*
* When not using MRR, do not sample if the probability is already
* higher than 95% to avoid wasting airtime
*/
if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
return -1;
/*
* Make sure that lower rates get sampled only occasionally,
* if the link is working perfectly.
*/
if (minstrel_get_duration(sample_idx) >
minstrel_get_duration(mi->max_tp_rate)) {
if (mr->sample_skipped < 20)
return -1;
if (mi->sample_slow++ > 2)
return -1;
}
return sample_idx;
}
static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct minstrel_priv *mp = priv;
int sample_idx;
bool sample = false;
if (rate_control_send_low(sta, priv_sta, txrc))
return;
if (!msp->is_ht)
return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
info->flags |= mi->tx_flags;
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
sample_idx = -1;
else
sample_idx = minstrel_get_sample_rate(mp, mi);
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
if (mp->fixed_rate_idx != -1)
sample_idx = mp->fixed_rate_idx;
#endif
if (sample_idx >= 0) {
sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
true, false);
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
false, false);
}
if (mp->hw->max_rates >= 3) {
/*
* At least 3 tx rates supported, use
* sample_rate -> max_tp_rate -> max_prob_rate for sampling and
* max_tp_rate -> max_tp_rate2 -> max_prob_rate by default.
*/
if (sample_idx >= 0)
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
false, false);
else
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
false, true);
minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
false, !sample);
ar[3].count = 0;
ar[3].idx = -1;
} else if (mp->hw->max_rates == 2) {
/*
* Only 2 tx rates supported, use
* sample_rate -> max_prob_rate for sampling and
* max_tp_rate -> max_prob_rate by default.
*/
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
false, !sample);
ar[2].count = 0;
ar[2].idx = -1;
} else {
/* Not using MRR, only use the first rate */
ar[1].count = 0;
ar[1].idx = -1;
}
mi->total_packets++;
/* wraparound */
if (mi->total_packets == ~0) {
mi->total_packets = 0;
mi->sample_packets = 0;
}
}
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
enum nl80211_channel_type oper_chan_type)
{
struct minstrel_priv *mp = priv;
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
struct ieee80211_local *local = hw_to_local(mp->hw);
u16 sta_cap = sta->ht_cap.cap;
int n_supported = 0;
int ack_dur;
int stbc;
int i;
unsigned int smps;
/* fall back to the old minstrel for legacy stations */
if (!sta->ht_cap.ht_supported)
goto use_legacy;
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS);
msp->is_ht = true;
memset(mi, 0, sizeof(*mi));
mi->stats_update = jiffies;
ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
/* When using MRR, sample more on the first attempt, without delay */
if (mp->has_mrr) {
mi->sample_count = 16;
mi->sample_wait = 0;
} else {
mi->sample_count = 8;
mi->sample_wait = 8;
}
mi->sample_tries = 4;
stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
IEEE80211_HT_CAP_RX_STBC_SHIFT;
mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
oper_chan_type != NL80211_CHAN_HT40PLUS)
sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT;
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
u16 req = 0;
mi->groups[i].supported = 0;
if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
req |= IEEE80211_HT_CAP_SGI_40;
else
req |= IEEE80211_HT_CAP_SGI_20;
}
if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
if ((sta_cap & req) != req)
continue;
/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
if (smps == WLAN_HT_CAP_SM_PS_STATIC &&
minstrel_mcs_groups[i].streams > 1)
continue;
mi->groups[i].supported =
mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
if (mi->groups[i].supported)
n_supported++;
}
if (!n_supported)
goto use_legacy;
return;
use_legacy:
msp->is_ht = false;
memset(&msp->legacy, 0, sizeof(msp->legacy));
msp->legacy.r = msp->ratelist;
msp->legacy.sample_table = msp->sample_table;
return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed, enum nl80211_channel_type oper_chan_type)
{
minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
}
static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct ieee80211_supported_band *sband;
struct minstrel_ht_sta_priv *msp;
struct minstrel_priv *mp = priv;
struct ieee80211_hw *hw = mp->hw;
int max_rates = 0;
int i;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
if (!msp)
return NULL;
msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
if (!msp->ratelist)
goto error;
msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
if (!msp->sample_table)
goto error1;
return msp;
error1:
kfree(msp->ratelist);
error:
kfree(msp);
return NULL;
}
static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
kfree(msp->sample_table);
kfree(msp->ratelist);
kfree(msp);
}
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return mac80211_minstrel.alloc(hw, debugfsdir);
}
static void
minstrel_ht_free(void *priv)
{
mac80211_minstrel.free(priv);
}
static struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
.tx_status = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
.rate_init = minstrel_ht_rate_init,
.rate_update = minstrel_ht_rate_update,
.alloc_sta = minstrel_ht_alloc_sta,
.free_sta = minstrel_ht_free_sta,
.alloc = minstrel_ht_alloc,
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
.remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
#endif
};
static void
init_sample_table(void)
{
int col, i, new_idx;
u8 rnd[MCS_GROUP_RATES];
memset(sample_table, 0xff, sizeof(sample_table));
for (col = 0; col < SAMPLE_COLUMNS; col++) {
for (i = 0; i < MCS_GROUP_RATES; i++) {
get_random_bytes(rnd, sizeof(rnd));
new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
while (sample_table[col][new_idx] != 0xff)
new_idx = (new_idx + 1) % MCS_GROUP_RATES;
sample_table[col][new_idx] = i;
}
}
}
int __init
rc80211_minstrel_ht_init(void)
{
init_sample_table();
return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}
void
rc80211_minstrel_ht_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}
| gpl-2.0 |
cmotc/android_kernel_samsung_schS738c | fs/quota/quota.c | 4837 | 9809 | /*
* Quota code necessary even when VFS quota support is not compiled
* into the kernel. The interesting stuff is over in dquot.c, here
* we have symbols for initial quotactl(2) handling, the sysctl(2)
* variables, etc - things needed even when quota support disabled.
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
{
switch (cmd) {
/* these commands do not require any special privilegues */
case Q_GETFMT:
case Q_SYNC:
case Q_GETINFO:
case Q_XGETQSTAT:
case Q_XQUOTASYNC:
break;
/* allow to query information for dquots we "own" */
case Q_GETQUOTA:
case Q_XGETQUOTA:
if ((type == USRQUOTA && current_euid() == id) ||
(type == GRPQUOTA && in_egroup_p(id)))
break;
/*FALLTHROUGH*/
default:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
return security_quotactl(cmd, type, id, sb);
}
static void quota_sync_one(struct super_block *sb, void *arg)
{
if (sb->s_qcop && sb->s_qcop->quota_sync)
sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
}
static int quota_sync_all(int type)
{
int ret;
if (type >= MAXQUOTAS)
return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (!ret)
iterate_supers(quota_sync_one, &type);
return ret;
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
struct path *path)
{
if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
return -ENOSYS;
if (sb->s_qcop->quota_on_meta)
return sb->s_qcop->quota_on_meta(sb, type, id);
if (IS_ERR(path))
return PTR_ERR(path);
return sb->s_qcop->quota_on(sb, type, id, path);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
down_read(&sb_dqopt(sb)->dqptr_sem);
if (!sb_has_quota_active(sb, type)) {
up_read(&sb_dqopt(sb)->dqptr_sem);
return -ESRCH;
}
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
up_read(&sb_dqopt(sb)->dqptr_sem);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
}
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
int ret;
if (!sb->s_qcop->get_info)
return -ENOSYS;
ret = sb->s_qcop->get_info(sb, type, &info);
if (!ret && copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
return ret;
}
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
if (!sb->s_qcop->set_info)
return -ENOSYS;
return sb->s_qcop->set_info(sb, type, &info);
}
static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
{
dst->dqb_bhardlimit = src->d_blk_hardlimit;
dst->dqb_bsoftlimit = src->d_blk_softlimit;
dst->dqb_curspace = src->d_bcount;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
dst->dqb_curinodes = src->d_icount;
dst->dqb_btime = src->d_btimer;
dst->dqb_itime = src->d_itimer;
dst->dqb_valid = QIF_ALL;
}
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct if_dqblk idq;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (ret)
return ret;
copy_to_if_dqblk(&idq, &fdq);
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
{
dst->d_blk_hardlimit = src->dqb_bhardlimit;
dst->d_blk_softlimit = src->dqb_bsoftlimit;
dst->d_bcount = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
dst->d_icount = src->dqb_curinodes;
dst->d_btimer = src->dqb_btime;
dst->d_itimer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
if (src->dqb_valid & QIF_SPACE)
dst->d_fieldmask |= FS_DQ_BCOUNT;
if (src->dqb_valid & QIF_ILIMITS)
dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
if (src->dqb_valid & QIF_INODES)
dst->d_fieldmask |= FS_DQ_ICOUNT;
if (src->dqb_valid & QIF_BTIME)
dst->d_fieldmask |= FS_DQ_BTIMER;
if (src->dqb_valid & QIF_ITIME)
dst->d_fieldmask |= FS_DQ_ITIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
copy_from_if_dqblk(&fdq, &idq);
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->set_xstate)
return -ENOSYS;
return sb->s_qcop->set_xstate(sb, flags, cmd);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
{
struct fs_quota_stat fqs;
int ret;
if (!sb->s_qcop->get_xstate)
return -ENOSYS;
ret = sb->s_qcop->get_xstate(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
}
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr, struct path *path)
{
int ret;
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
if (!sb->s_qcop)
return -ENOSYS;
ret = check_quotactl_permission(sb, type, cmd, id);
if (ret < 0)
return ret;
switch (cmd) {
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
return sb->s_qcop->quota_off(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
return quota_getinfo(sb, type, addr);
case Q_SETINFO:
return quota_setinfo(sb, type, addr);
case Q_GETQUOTA:
return quota_getquota(sb, type, id, addr);
case Q_SETQUOTA:
return quota_setquota(sb, type, id, addr);
case Q_SYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type, 1);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
return quota_setxstate(sb, cmd, addr);
case Q_XGETQSTAT:
return quota_getxstate(sb, addr);
case Q_XSETQLIM:
return quota_setxquota(sb, type, id, addr);
case Q_XGETQUOTA:
return quota_getxquota(sb, type, id, addr);
case Q_XQUOTASYNC:
if (sb->s_flags & MS_RDONLY)
return -EROFS;
/* XFS quotas are fully coherent now, making this call a noop */
return 0;
default:
return -EINVAL;
}
}
/* Return 1 if 'cmd' will block on frozen filesystem */
static int quotactl_cmd_write(int cmd)
{
switch (cmd) {
case Q_GETFMT:
case Q_GETINFO:
case Q_SYNC:
case Q_XGETQSTAT:
case Q_XGETQUOTA:
case Q_XQUOTASYNC:
return 0;
}
return 1;
}
/*
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
static struct super_block *quotactl_block(const char __user *special, int cmd)
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
struct super_block *sb;
char *tmp = getname(special);
if (IS_ERR(tmp))
return ERR_CAST(tmp);
bdev = lookup_bdev(tmp);
putname(tmp);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
if (quotactl_cmd_write(cmd))
sb = get_super_thawed(bdev);
else
sb = get_super(bdev);
bdput(bdev);
if (!sb)
return ERR_PTR(-ENODEV);
return sb;
#else
return ERR_PTR(-ENODEV);
#endif
}
/*
* This is the system call interface. This communicates with
* the user-level programs. Currently this only supports diskquota
* calls. Maybe we need to add the process quotas etc. in the future,
* but we probably should use rlimits for that.
*/
SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
qid_t, id, void __user *, addr)
{
uint cmds, type;
struct super_block *sb = NULL;
struct path path, *pathp = NULL;
int ret;
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
* the sync action on each of them.
*/
if (!special) {
if (cmds == Q_SYNC)
return quota_sync_all(type);
return -ENODEV;
}
/*
* Path for quotaon has to be resolved before grabbing superblock
* because that gets s_umount sem which is also possibly needed by path
* resolution (think about autofs) and thus deadlocks could arise.
*/
if (cmds == Q_QUOTAON) {
ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
if (ret)
pathp = ERR_PTR(ret);
else
pathp = &path;
}
sb = quotactl_block(special, cmds);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
goto out;
}
ret = do_quotactl(sb, type, cmds, id, addr, pathp);
drop_super(sb);
out:
if (pathp && !IS_ERR(pathp))
path_put(pathp);
return ret;
}
| gpl-2.0 |
VeryLettuce/LG_F120K_Kernel | arch/x86/platform/olpc/olpc-xo1-sci.c | 5349 | 14729 | /*
* Support for OLPC XO-1 System Control Interrupts (SCI)
*
* Copyright (C) 2010 One Laptop per Child
* Copyright (C) 2006 Red Hat, Inc.
* Copyright (C) 2006 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cs5535.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mfd/core.h>
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo1-sci"
#define PFX DRV_NAME ": "
static unsigned long acpi_base;
static struct input_dev *power_button_idev;
static struct input_dev *ebook_switch_idev;
static struct input_dev *lid_switch_idev;
static int sci_irq;
static bool lid_open;
static bool lid_inverted;
static int lid_wake_mode;
enum lid_wake_modes {
LID_WAKE_ALWAYS,
LID_WAKE_OPEN,
LID_WAKE_CLOSE,
};
static const char * const lid_wake_mode_names[] = {
[LID_WAKE_ALWAYS] = "always",
[LID_WAKE_OPEN] = "open",
[LID_WAKE_CLOSE] = "close",
};
static void battery_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-battery");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void ac_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-ac");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
/* Report current ebook switch state through input layer */
static void send_ebook_state(void)
{
unsigned char state;
if (olpc_ec_cmd(EC_READ_EB_MODE, NULL, 0, &state, 1)) {
pr_err(PFX "failed to get ebook state\n");
return;
}
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
input_sync(ebook_switch_idev);
}
static void flip_lid_inverter(void)
{
/* gpio is high; invert so we'll get l->h event interrupt */
if (lid_inverted)
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
else
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
lid_inverted = !lid_inverted;
}
static void detect_lid_state(void)
{
/*
* the edge detector hookup on the gpio inputs on the geode is
* odd, to say the least. See http://dev.laptop.org/ticket/5703
* for details, but in a nutshell: we don't use the edge
* detectors. instead, we make use of an anomoly: with the both
* edge detectors turned off, we still get an edge event on a
* positive edge transition. to take advantage of this, we use the
* front-end inverter to ensure that that's the edge we're always
* going to see next.
*/
int state;
state = cs5535_gpio_isset(OLPC_GPIO_LID, GPIO_READ_BACK);
lid_open = !state ^ !lid_inverted; /* x ^^ y */
if (!state)
return;
flip_lid_inverter();
}
/* Report current lid switch state through input layer */
static void send_lid_state(void)
{
input_report_switch(lid_switch_idev, SW_LID, !lid_open);
input_sync(lid_switch_idev);
}
static ssize_t lid_wake_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *mode = lid_wake_mode_names[lid_wake_mode];
return sprintf(buf, "%s\n", mode);
}
static ssize_t lid_wake_mode_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int i;
for (i = 0; i < ARRAY_SIZE(lid_wake_mode_names); i++) {
const char *mode = lid_wake_mode_names[i];
if (strlen(mode) != count || strncasecmp(mode, buf, count))
continue;
lid_wake_mode = i;
return count;
}
return -EINVAL;
}
static DEVICE_ATTR(lid_wake_mode, S_IWUSR | S_IRUGO, lid_wake_mode_show,
lid_wake_mode_set);
/*
* Process all items in the EC's SCI queue.
*
* This is handled in a workqueue because olpc_ec_cmd can be slow (and
* can even timeout).
*
* If propagate_events is false, the queue is drained without events being
* generated for the interrupts.
*/
static void process_sci_queue(bool propagate_events)
{
int r;
u16 data;
do {
r = olpc_ec_sci_query(&data);
if (r || !data)
break;
pr_debug(PFX "SCI 0x%x received\n", data);
switch (data) {
case EC_SCI_SRC_BATERR:
case EC_SCI_SRC_BATSOC:
case EC_SCI_SRC_BATTERY:
case EC_SCI_SRC_BATCRIT:
battery_status_changed();
break;
case EC_SCI_SRC_ACPWR:
ac_status_changed();
break;
}
if (data == EC_SCI_SRC_EBOOK && propagate_events)
send_ebook_state();
} while (data);
if (r)
pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
process_sci_queue(true);
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static irqreturn_t xo1_sci_intr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
u32 sts;
u32 gpe;
sts = inl(acpi_base + CS5536_PM1_STS);
outl(sts | 0xffff, acpi_base + CS5536_PM1_STS);
gpe = inl(acpi_base + CS5536_PM_GPE0_STS);
outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
dev_dbg(&pdev->dev, "sts %x gpe %x\n", sts, gpe);
if (sts & CS5536_PWRBTN_FLAG && !(sts & CS5536_WAK_FLAG)) {
input_report_key(power_button_idev, KEY_POWER, 1);
input_sync(power_button_idev);
input_report_key(power_button_idev, KEY_POWER, 0);
input_sync(power_button_idev);
}
if (gpe & CS5536_GPIOM7_PME_FLAG) { /* EC GPIO */
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
schedule_work(&sci_work);
}
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
detect_lid_state();
send_lid_state();
return IRQ_HANDLED;
}
static int xo1_sci_suspend(struct platform_device *pdev, pm_message_t state)
{
if (device_may_wakeup(&power_button_idev->dev))
olpc_xo1_pm_wakeup_set(CS5536_PM_PWRBTN);
else
olpc_xo1_pm_wakeup_clear(CS5536_PM_PWRBTN);
if (device_may_wakeup(&ebook_switch_idev->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_EBOOK);
else
olpc_ec_wakeup_clear(EC_SCI_SRC_EBOOK);
if (!device_may_wakeup(&lid_switch_idev->dev)) {
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
} else if ((lid_open && lid_wake_mode == LID_WAKE_OPEN) ||
(!lid_open && lid_wake_mode == LID_WAKE_CLOSE)) {
flip_lid_inverter();
/* we may have just caused an event */
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
}
return 0;
}
static int xo1_sci_resume(struct platform_device *pdev)
{
/*
* We don't know what may have happened while we were asleep.
* Reestablish our lid setup so we're sure to catch all transitions.
*/
detect_lid_state();
send_lid_state();
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
/* Power/battery status might have changed too */
battery_status_changed();
ac_status_changed();
return 0;
}
static int __devinit setup_sci_interrupt(struct platform_device *pdev)
{
u32 lo, hi;
u32 sts;
int r;
rdmsr(0x51400020, lo, hi);
sci_irq = (lo >> 20) & 15;
if (sci_irq) {
dev_info(&pdev->dev, "SCI is mapped to IRQ %d\n", sci_irq);
} else {
/* Zero means masked */
dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n");
sci_irq = 3;
lo |= 0x00300000;
wrmsrl(0x51400020, lo);
}
/* Select level triggered in PIC */
if (sci_irq < 8) {
lo = inb(CS5536_PIC_INT_SEL1);
lo |= 1 << sci_irq;
outb(lo, CS5536_PIC_INT_SEL1);
} else {
lo = inb(CS5536_PIC_INT_SEL2);
lo |= 1 << (sci_irq - 8);
outb(lo, CS5536_PIC_INT_SEL2);
}
/* Enable SCI from power button, and clear pending interrupts */
sts = inl(acpi_base + CS5536_PM1_STS);
outl((CS5536_PM_PWRBTN << 16) | 0xffff, acpi_base + CS5536_PM1_STS);
r = request_irq(sci_irq, xo1_sci_intr, 0, DRV_NAME, pdev);
if (r)
dev_err(&pdev->dev, "can't request interrupt\n");
return r;
}
static int __devinit setup_ec_sci(void)
{
int r;
r = gpio_request(OLPC_GPIO_ECSCI, "OLPC-ECSCI");
if (r)
return r;
gpio_direction_input(OLPC_GPIO_ECSCI);
/* Clear pending EC SCI events */
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_POSITIVE_EDGE_STS);
/*
* Enable EC SCI events, and map them to both a PME and the SCI
* interrupt.
*
* Ordinarily, in addition to functioning as GPIOs, Geode GPIOs can
* be mapped to regular interrupts *or* Geode-specific Power
* Management Events (PMEs) - events that bring the system out of
* suspend. In this case, we want both of those things - the system
* wakeup, *and* the ability to get an interrupt when an event occurs.
*
* To achieve this, we map the GPIO to a PME, and then we use one
* of the many generic knobs on the CS5535 PIC to additionally map the
* PME to the regular SCI interrupt line.
*/
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_EVENTS_ENABLE);
/* Set the SCI to cause a PME event on group 7 */
cs5535_gpio_setup_event(OLPC_GPIO_ECSCI, 7, 1);
/* And have group 7 also fire the SCI interrupt */
cs5535_pic_unreqz_select_high(7, sci_irq);
return 0;
}
static void free_ec_sci(void)
{
gpio_free(OLPC_GPIO_ECSCI);
}
static int __devinit setup_lid_events(void)
{
int r;
r = gpio_request(OLPC_GPIO_LID, "OLPC-LID");
if (r)
return r;
gpio_direction_input(OLPC_GPIO_LID);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
lid_inverted = 0;
/* Clear edge detection and event enable for now */
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_EN);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_EN);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
/* Set the LID to cause an PME event on group 6 */
cs5535_gpio_setup_event(OLPC_GPIO_LID, 6, 1);
/* Set PME group 6 to fire the SCI interrupt */
cs5535_gpio_set_irq(6, sci_irq);
/* Enable the event */
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
return 0;
}
static void free_lid_events(void)
{
gpio_free(OLPC_GPIO_LID);
}
static int __devinit setup_power_button(struct platform_device *pdev)
{
int r;
power_button_idev = input_allocate_device();
if (!power_button_idev)
return -ENOMEM;
power_button_idev->name = "Power Button";
power_button_idev->phys = DRV_NAME "/input0";
set_bit(EV_KEY, power_button_idev->evbit);
set_bit(KEY_POWER, power_button_idev->keybit);
power_button_idev->dev.parent = &pdev->dev;
device_init_wakeup(&power_button_idev->dev, 1);
r = input_register_device(power_button_idev);
if (r) {
dev_err(&pdev->dev, "failed to register power button: %d\n", r);
input_free_device(power_button_idev);
}
return r;
}
static void free_power_button(void)
{
input_unregister_device(power_button_idev);
input_free_device(power_button_idev);
}
static int __devinit setup_ebook_switch(struct platform_device *pdev)
{
int r;
ebook_switch_idev = input_allocate_device();
if (!ebook_switch_idev)
return -ENOMEM;
ebook_switch_idev->name = "EBook Switch";
ebook_switch_idev->phys = DRV_NAME "/input1";
set_bit(EV_SW, ebook_switch_idev->evbit);
set_bit(SW_TABLET_MODE, ebook_switch_idev->swbit);
ebook_switch_idev->dev.parent = &pdev->dev;
device_set_wakeup_capable(&ebook_switch_idev->dev, true);
r = input_register_device(ebook_switch_idev);
if (r) {
dev_err(&pdev->dev, "failed to register ebook switch: %d\n", r);
input_free_device(ebook_switch_idev);
}
return r;
}
static void free_ebook_switch(void)
{
input_unregister_device(ebook_switch_idev);
input_free_device(ebook_switch_idev);
}
static int __devinit setup_lid_switch(struct platform_device *pdev)
{
int r;
lid_switch_idev = input_allocate_device();
if (!lid_switch_idev)
return -ENOMEM;
lid_switch_idev->name = "Lid Switch";
lid_switch_idev->phys = DRV_NAME "/input2";
set_bit(EV_SW, lid_switch_idev->evbit);
set_bit(SW_LID, lid_switch_idev->swbit);
lid_switch_idev->dev.parent = &pdev->dev;
device_set_wakeup_capable(&lid_switch_idev->dev, true);
r = input_register_device(lid_switch_idev);
if (r) {
dev_err(&pdev->dev, "failed to register lid switch: %d\n", r);
goto err_register;
}
r = device_create_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
if (r) {
dev_err(&pdev->dev, "failed to create wake mode attr: %d\n", r);
goto err_create_attr;
}
return 0;
err_create_attr:
input_unregister_device(lid_switch_idev);
err_register:
input_free_device(lid_switch_idev);
return r;
}
static void free_lid_switch(void)
{
device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
input_unregister_device(lid_switch_idev);
input_free_device(lid_switch_idev);
}
static int __devinit xo1_sci_probe(struct platform_device *pdev)
{
struct resource *res;
int r;
/* don't run on non-XOs */
if (!machine_is_olpc())
return -ENODEV;
r = mfd_cell_enable(pdev);
if (r)
return r;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
acpi_base = res->start;
r = setup_power_button(pdev);
if (r)
return r;
r = setup_ebook_switch(pdev);
if (r)
goto err_ebook;
r = setup_lid_switch(pdev);
if (r)
goto err_lid;
r = setup_lid_events();
if (r)
goto err_lidevt;
r = setup_ec_sci();
if (r)
goto err_ecsci;
/* Enable PME generation for EC-generated events */
outl(CS5536_GPIOM6_PME_EN | CS5536_GPIOM7_PME_EN,
acpi_base + CS5536_PM_GPE0_EN);
/* Clear pending events */
outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
process_sci_queue(false);
/* Initial sync */
send_ebook_state();
detect_lid_state();
send_lid_state();
r = setup_sci_interrupt(pdev);
if (r)
goto err_sci;
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
return r;
err_sci:
free_ec_sci();
err_ecsci:
free_lid_events();
err_lidevt:
free_lid_switch();
err_lid:
free_ebook_switch();
err_ebook:
free_power_button();
return r;
}
static int __devexit xo1_sci_remove(struct platform_device *pdev)
{
mfd_cell_disable(pdev);
free_irq(sci_irq, pdev);
cancel_work_sync(&sci_work);
free_ec_sci();
free_lid_events();
free_lid_switch();
free_ebook_switch();
free_power_button();
acpi_base = 0;
return 0;
}
static struct platform_driver xo1_sci_driver = {
.driver = {
.name = "olpc-xo1-sci-acpi",
},
.probe = xo1_sci_probe,
.remove = __devexit_p(xo1_sci_remove),
.suspend = xo1_sci_suspend,
.resume = xo1_sci_resume,
};
static int __init xo1_sci_init(void)
{
return platform_driver_register(&xo1_sci_driver);
}
arch_initcall(xo1_sci_init);
| gpl-2.0 |
hanshuebner/linux-xlnx | arch/x86/platform/olpc/olpc-xo1-sci.c | 5349 | 14729 | /*
* Support for OLPC XO-1 System Control Interrupts (SCI)
*
* Copyright (C) 2010 One Laptop per Child
* Copyright (C) 2006 Red Hat, Inc.
* Copyright (C) 2006 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cs5535.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mfd/core.h>
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo1-sci"
#define PFX DRV_NAME ": "
static unsigned long acpi_base;
static struct input_dev *power_button_idev;
static struct input_dev *ebook_switch_idev;
static struct input_dev *lid_switch_idev;
static int sci_irq;
static bool lid_open;
static bool lid_inverted;
static int lid_wake_mode;
enum lid_wake_modes {
LID_WAKE_ALWAYS,
LID_WAKE_OPEN,
LID_WAKE_CLOSE,
};
static const char * const lid_wake_mode_names[] = {
[LID_WAKE_ALWAYS] = "always",
[LID_WAKE_OPEN] = "open",
[LID_WAKE_CLOSE] = "close",
};
static void battery_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-battery");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void ac_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-ac");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
/* Report current ebook switch state through input layer */
static void send_ebook_state(void)
{
unsigned char state;
if (olpc_ec_cmd(EC_READ_EB_MODE, NULL, 0, &state, 1)) {
pr_err(PFX "failed to get ebook state\n");
return;
}
input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
input_sync(ebook_switch_idev);
}
static void flip_lid_inverter(void)
{
/* gpio is high; invert so we'll get l->h event interrupt */
if (lid_inverted)
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
else
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
lid_inverted = !lid_inverted;
}
static void detect_lid_state(void)
{
/*
* the edge detector hookup on the gpio inputs on the geode is
* odd, to say the least. See http://dev.laptop.org/ticket/5703
* for details, but in a nutshell: we don't use the edge
* detectors. instead, we make use of an anomoly: with the both
* edge detectors turned off, we still get an edge event on a
* positive edge transition. to take advantage of this, we use the
* front-end inverter to ensure that that's the edge we're always
* going to see next.
*/
int state;
state = cs5535_gpio_isset(OLPC_GPIO_LID, GPIO_READ_BACK);
lid_open = !state ^ !lid_inverted; /* x ^^ y */
if (!state)
return;
flip_lid_inverter();
}
/* Report current lid switch state through input layer */
static void send_lid_state(void)
{
input_report_switch(lid_switch_idev, SW_LID, !lid_open);
input_sync(lid_switch_idev);
}
static ssize_t lid_wake_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *mode = lid_wake_mode_names[lid_wake_mode];
return sprintf(buf, "%s\n", mode);
}
static ssize_t lid_wake_mode_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int i;
for (i = 0; i < ARRAY_SIZE(lid_wake_mode_names); i++) {
const char *mode = lid_wake_mode_names[i];
if (strlen(mode) != count || strncasecmp(mode, buf, count))
continue;
lid_wake_mode = i;
return count;
}
return -EINVAL;
}
static DEVICE_ATTR(lid_wake_mode, S_IWUSR | S_IRUGO, lid_wake_mode_show,
lid_wake_mode_set);
/*
* Process all items in the EC's SCI queue.
*
* This is handled in a workqueue because olpc_ec_cmd can be slow (and
* can even timeout).
*
* If propagate_events is false, the queue is drained without events being
* generated for the interrupts.
*/
static void process_sci_queue(bool propagate_events)
{
int r;
u16 data;
do {
r = olpc_ec_sci_query(&data);
if (r || !data)
break;
pr_debug(PFX "SCI 0x%x received\n", data);
switch (data) {
case EC_SCI_SRC_BATERR:
case EC_SCI_SRC_BATSOC:
case EC_SCI_SRC_BATTERY:
case EC_SCI_SRC_BATCRIT:
battery_status_changed();
break;
case EC_SCI_SRC_ACPWR:
ac_status_changed();
break;
}
if (data == EC_SCI_SRC_EBOOK && propagate_events)
send_ebook_state();
} while (data);
if (r)
pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
process_sci_queue(true);
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static irqreturn_t xo1_sci_intr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
u32 sts;
u32 gpe;
sts = inl(acpi_base + CS5536_PM1_STS);
outl(sts | 0xffff, acpi_base + CS5536_PM1_STS);
gpe = inl(acpi_base + CS5536_PM_GPE0_STS);
outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
dev_dbg(&pdev->dev, "sts %x gpe %x\n", sts, gpe);
if (sts & CS5536_PWRBTN_FLAG && !(sts & CS5536_WAK_FLAG)) {
input_report_key(power_button_idev, KEY_POWER, 1);
input_sync(power_button_idev);
input_report_key(power_button_idev, KEY_POWER, 0);
input_sync(power_button_idev);
}
if (gpe & CS5536_GPIOM7_PME_FLAG) { /* EC GPIO */
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
schedule_work(&sci_work);
}
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
detect_lid_state();
send_lid_state();
return IRQ_HANDLED;
}
static int xo1_sci_suspend(struct platform_device *pdev, pm_message_t state)
{
if (device_may_wakeup(&power_button_idev->dev))
olpc_xo1_pm_wakeup_set(CS5536_PM_PWRBTN);
else
olpc_xo1_pm_wakeup_clear(CS5536_PM_PWRBTN);
if (device_may_wakeup(&ebook_switch_idev->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_EBOOK);
else
olpc_ec_wakeup_clear(EC_SCI_SRC_EBOOK);
if (!device_may_wakeup(&lid_switch_idev->dev)) {
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
} else if ((lid_open && lid_wake_mode == LID_WAKE_OPEN) ||
(!lid_open && lid_wake_mode == LID_WAKE_CLOSE)) {
flip_lid_inverter();
/* we may have just caused an event */
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
}
return 0;
}
static int xo1_sci_resume(struct platform_device *pdev)
{
/*
* We don't know what may have happened while we were asleep.
* Reestablish our lid setup so we're sure to catch all transitions.
*/
detect_lid_state();
send_lid_state();
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
/* Power/battery status might have changed too */
battery_status_changed();
ac_status_changed();
return 0;
}
static int __devinit setup_sci_interrupt(struct platform_device *pdev)
{
u32 lo, hi;
u32 sts;
int r;
rdmsr(0x51400020, lo, hi);
sci_irq = (lo >> 20) & 15;
if (sci_irq) {
dev_info(&pdev->dev, "SCI is mapped to IRQ %d\n", sci_irq);
} else {
/* Zero means masked */
dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n");
sci_irq = 3;
lo |= 0x00300000;
wrmsrl(0x51400020, lo);
}
/* Select level triggered in PIC */
if (sci_irq < 8) {
lo = inb(CS5536_PIC_INT_SEL1);
lo |= 1 << sci_irq;
outb(lo, CS5536_PIC_INT_SEL1);
} else {
lo = inb(CS5536_PIC_INT_SEL2);
lo |= 1 << (sci_irq - 8);
outb(lo, CS5536_PIC_INT_SEL2);
}
/* Enable SCI from power button, and clear pending interrupts */
sts = inl(acpi_base + CS5536_PM1_STS);
outl((CS5536_PM_PWRBTN << 16) | 0xffff, acpi_base + CS5536_PM1_STS);
r = request_irq(sci_irq, xo1_sci_intr, 0, DRV_NAME, pdev);
if (r)
dev_err(&pdev->dev, "can't request interrupt\n");
return r;
}
static int __devinit setup_ec_sci(void)
{
int r;
r = gpio_request(OLPC_GPIO_ECSCI, "OLPC-ECSCI");
if (r)
return r;
gpio_direction_input(OLPC_GPIO_ECSCI);
/* Clear pending EC SCI events */
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_POSITIVE_EDGE_STS);
/*
* Enable EC SCI events, and map them to both a PME and the SCI
* interrupt.
*
* Ordinarily, in addition to functioning as GPIOs, Geode GPIOs can
* be mapped to regular interrupts *or* Geode-specific Power
* Management Events (PMEs) - events that bring the system out of
* suspend. In this case, we want both of those things - the system
* wakeup, *and* the ability to get an interrupt when an event occurs.
*
* To achieve this, we map the GPIO to a PME, and then we use one
* of the many generic knobs on the CS5535 PIC to additionally map the
* PME to the regular SCI interrupt line.
*/
cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_EVENTS_ENABLE);
/* Set the SCI to cause a PME event on group 7 */
cs5535_gpio_setup_event(OLPC_GPIO_ECSCI, 7, 1);
/* And have group 7 also fire the SCI interrupt */
cs5535_pic_unreqz_select_high(7, sci_irq);
return 0;
}
static void free_ec_sci(void)
{
gpio_free(OLPC_GPIO_ECSCI);
}
static int __devinit setup_lid_events(void)
{
int r;
r = gpio_request(OLPC_GPIO_LID, "OLPC-LID");
if (r)
return r;
gpio_direction_input(OLPC_GPIO_LID);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
lid_inverted = 0;
/* Clear edge detection and event enable for now */
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_EN);
cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_EN);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
/* Set the LID to cause an PME event on group 6 */
cs5535_gpio_setup_event(OLPC_GPIO_LID, 6, 1);
/* Set PME group 6 to fire the SCI interrupt */
cs5535_gpio_set_irq(6, sci_irq);
/* Enable the event */
cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
return 0;
}
static void free_lid_events(void)
{
gpio_free(OLPC_GPIO_LID);
}
static int __devinit setup_power_button(struct platform_device *pdev)
{
int r;
power_button_idev = input_allocate_device();
if (!power_button_idev)
return -ENOMEM;
power_button_idev->name = "Power Button";
power_button_idev->phys = DRV_NAME "/input0";
set_bit(EV_KEY, power_button_idev->evbit);
set_bit(KEY_POWER, power_button_idev->keybit);
power_button_idev->dev.parent = &pdev->dev;
device_init_wakeup(&power_button_idev->dev, 1);
r = input_register_device(power_button_idev);
if (r) {
dev_err(&pdev->dev, "failed to register power button: %d\n", r);
input_free_device(power_button_idev);
}
return r;
}
static void free_power_button(void)
{
input_unregister_device(power_button_idev);
input_free_device(power_button_idev);
}
static int __devinit setup_ebook_switch(struct platform_device *pdev)
{
int r;
ebook_switch_idev = input_allocate_device();
if (!ebook_switch_idev)
return -ENOMEM;
ebook_switch_idev->name = "EBook Switch";
ebook_switch_idev->phys = DRV_NAME "/input1";
set_bit(EV_SW, ebook_switch_idev->evbit);
set_bit(SW_TABLET_MODE, ebook_switch_idev->swbit);
ebook_switch_idev->dev.parent = &pdev->dev;
device_set_wakeup_capable(&ebook_switch_idev->dev, true);
r = input_register_device(ebook_switch_idev);
if (r) {
dev_err(&pdev->dev, "failed to register ebook switch: %d\n", r);
input_free_device(ebook_switch_idev);
}
return r;
}
static void free_ebook_switch(void)
{
input_unregister_device(ebook_switch_idev);
input_free_device(ebook_switch_idev);
}
static int __devinit setup_lid_switch(struct platform_device *pdev)
{
int r;
lid_switch_idev = input_allocate_device();
if (!lid_switch_idev)
return -ENOMEM;
lid_switch_idev->name = "Lid Switch";
lid_switch_idev->phys = DRV_NAME "/input2";
set_bit(EV_SW, lid_switch_idev->evbit);
set_bit(SW_LID, lid_switch_idev->swbit);
lid_switch_idev->dev.parent = &pdev->dev;
device_set_wakeup_capable(&lid_switch_idev->dev, true);
r = input_register_device(lid_switch_idev);
if (r) {
dev_err(&pdev->dev, "failed to register lid switch: %d\n", r);
goto err_register;
}
r = device_create_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
if (r) {
dev_err(&pdev->dev, "failed to create wake mode attr: %d\n", r);
goto err_create_attr;
}
return 0;
err_create_attr:
input_unregister_device(lid_switch_idev);
err_register:
input_free_device(lid_switch_idev);
return r;
}
static void free_lid_switch(void)
{
device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
input_unregister_device(lid_switch_idev);
input_free_device(lid_switch_idev);
}
static int __devinit xo1_sci_probe(struct platform_device *pdev)
{
struct resource *res;
int r;
/* don't run on non-XOs */
if (!machine_is_olpc())
return -ENODEV;
r = mfd_cell_enable(pdev);
if (r)
return r;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
acpi_base = res->start;
r = setup_power_button(pdev);
if (r)
return r;
r = setup_ebook_switch(pdev);
if (r)
goto err_ebook;
r = setup_lid_switch(pdev);
if (r)
goto err_lid;
r = setup_lid_events();
if (r)
goto err_lidevt;
r = setup_ec_sci();
if (r)
goto err_ecsci;
/* Enable PME generation for EC-generated events */
outl(CS5536_GPIOM6_PME_EN | CS5536_GPIOM7_PME_EN,
acpi_base + CS5536_PM_GPE0_EN);
/* Clear pending events */
outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
process_sci_queue(false);
/* Initial sync */
send_ebook_state();
detect_lid_state();
send_lid_state();
r = setup_sci_interrupt(pdev);
if (r)
goto err_sci;
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
return r;
err_sci:
free_ec_sci();
err_ecsci:
free_lid_events();
err_lidevt:
free_lid_switch();
err_lid:
free_ebook_switch();
err_ebook:
free_power_button();
return r;
}
static int __devexit xo1_sci_remove(struct platform_device *pdev)
{
mfd_cell_disable(pdev);
free_irq(sci_irq, pdev);
cancel_work_sync(&sci_work);
free_ec_sci();
free_lid_events();
free_lid_switch();
free_ebook_switch();
free_power_button();
acpi_base = 0;
return 0;
}
static struct platform_driver xo1_sci_driver = {
.driver = {
.name = "olpc-xo1-sci-acpi",
},
.probe = xo1_sci_probe,
.remove = __devexit_p(xo1_sci_remove),
.suspend = xo1_sci_suspend,
.resume = xo1_sci_resume,
};
static int __init xo1_sci_init(void)
{
return platform_driver_register(&xo1_sci_driver);
}
arch_initcall(xo1_sci_init);
| gpl-2.0 |
zeroblade1984/Galbi | net/wireless/ethtool.c | 8421 | 2109 | #include <linux/utsname.h>
#include <net/cfg80211.h>
#include "core.h"
#include "ethtool.h"
static void cfg80211_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
sizeof(info->driver));
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
if (wdev->wiphy->fw_version[0])
strncpy(info->fw_version, wdev->wiphy->fw_version,
sizeof(info->fw_version));
else
strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
sizeof(info->bus_info));
}
static int cfg80211_get_regs_len(struct net_device *dev)
{
/* For now, return 0... */
return 0;
}
static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *data)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
regs->version = wdev->wiphy->hw_version;
regs->len = 0;
}
static void cfg80211_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
memset(rp, 0, sizeof(*rp));
if (rdev->ops->get_ringparam)
rdev->ops->get_ringparam(wdev->wiphy,
&rp->tx_pending, &rp->tx_max_pending,
&rp->rx_pending, &rp->rx_max_pending);
}
static int cfg80211_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *rp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
return -EINVAL;
if (rdev->ops->set_ringparam)
return rdev->ops->set_ringparam(wdev->wiphy,
rp->tx_pending, rp->rx_pending);
return -ENOTSUPP;
}
const struct ethtool_ops cfg80211_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
.get_regs_len = cfg80211_get_regs_len,
.get_regs = cfg80211_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = cfg80211_get_ringparam,
.set_ringparam = cfg80211_set_ringparam,
};
| gpl-2.0 |
ikotpk/android_kernel_samsung_vastoskt | net/netfilter/xt_mac.c | 8677 | 1791 | /* Kernel module to match MAC address parameters. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_mac.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: MAC address match");
MODULE_ALIAS("ipt_mac");
MODULE_ALIAS("ip6t_mac");
static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_mac_info *info = par->matchinfo;
bool ret;
if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
return false;
if (skb_mac_header(skb) < skb->head)
return false;
if (skb_mac_header(skb) + ETH_HLEN > skb->data)
return false;
ret = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
ret ^= info->invert;
return ret;
}
static struct xt_match mac_mt_reg __read_mostly = {
.name = "mac",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = mac_mt,
.matchsize = sizeof(struct xt_mac_info),
.hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD),
.me = THIS_MODULE,
};
static int __init mac_mt_init(void)
{
return xt_register_match(&mac_mt_reg);
}
static void __exit mac_mt_exit(void)
{
xt_unregister_match(&mac_mt_reg);
}
module_init(mac_mt_init);
module_exit(mac_mt_exit);
| gpl-2.0 |
MIPS/qemu-android | tests/tcg/mips/mips32-dsp/dpaq_sa_l_w.c | 230 | 3107 | #include<stdio.h>
#include<assert.h>
int main()
{
int rs, rt, dsp;
int ach = 0, acl = 0;
int resulth, resultl, resultdsp;
rs = 0x80000000;
rt = 0x80000000;
resulth = 0x7FFFFFFF;
resultl = 0xFFFFFFFF;
resultdsp = 0x01;
__asm
("mthi %0, $ac1\n\t"
"mtlo %1, $ac1\n\t"
"dpaq_sa.l.w $ac1, %3, %4\n\t"
"mfhi %0, $ac1\n\t"
"mflo %1, $ac1\n\t"
"rddsp %2\n\t"
: "+r"(ach), "+r"(acl), "=r"(dsp)
: "r"(rs), "r"(rt)
);
dsp = (dsp >> 17) & 0x01;
assert(dsp == resultdsp);
assert(ach == resulth);
assert(acl == resultl);
ach = 0x00000012;
acl = 0x00000048;
rs = 0x80000000;
rt = 0x80000000;
resulth = 0x7FFFFFFF;
resultl = 0xFFFFFFFF;
resultdsp = 0x01;
__asm
("mthi %0, $ac1\n\t"
"mtlo %1, $ac1\n\t"
"dpaq_sa.l.w $ac1, %3, %4\n\t"
"mfhi %0, $ac1\n\t"
"mflo %1, $ac1\n\t"
"rddsp %2\n\t"
: "+r"(ach), "+r"(acl), "=r"(dsp)
: "r"(rs), "r"(rt)
);
dsp = (dsp >> 17) & 0x01;
assert(dsp == resultdsp);
assert(ach == resulth);
assert(acl == resultl);
ach = 0x741532A0;
acl = 0xFCEABB08;
rs = 0x80000000;
rt = 0x80000000;
resulth = 0x7FFFFFFF;
resultl = 0xFFFFFFFF;
resultdsp = 0x01;
__asm
("mthi %0, $ac1\n\t"
"mtlo %1, $ac1\n\t"
"dpaq_sa.l.w $ac1, %3, %4\n\t"
"mfhi %0, $ac1\n\t"
"mflo %1, $ac1\n\t"
"rddsp %2\n\t"
: "+r"(ach), "+r"(acl), "=r"(dsp)
: "r"(rs), "r"(rt)
);
dsp = (dsp >> 17) & 0x01;
assert(dsp == resultdsp);
assert(ach == resulth);
assert(acl == resultl);
ach = 0;
acl = 0;
rs = 0xC0000000;
rt = 0x7FFFFFFF;
resulth = 0xC0000000;
resultl = 0x80000000;
resultdsp = 0;
__asm
("wrdsp $0\n\t"
"mthi %0, $ac1\n\t"
"mtlo %1, $ac1\n\t"
"dpaq_sa.l.w $ac1, %3, %4\n\t"
"mfhi %0, $ac1\n\t"
"mflo %1, $ac1\n\t"
"rddsp %2\n\t"
: "+r"(ach), "+r"(acl), "=r"(dsp)
: "r"(rs), "r"(rt)
);
dsp = (dsp >> 17) & 0x01;
assert(dsp == resultdsp);
assert(ach == resulth);
assert(acl == resultl);
ach = 0x20000000;
acl = 0;
rs = 0xE0000000;
rt = 0x7FFFFFFF;
resulth = 0;
resultl = 0x40000000;
resultdsp = 0;
__asm
("wrdsp $0\n\t"
"mthi %0, $ac1\n\t"
"mtlo %1, $ac1\n\t"
"dpaq_sa.l.w $ac1, %3, %4\n\t"
"mfhi %0, $ac1\n\t"
"mflo %1, $ac1\n\t"
"rddsp %2\n\t"
: "+r"(ach), "+r"(acl), "=r"(dsp)
: "r"(rs), "r"(rt)
);
dsp = (dsp >> 17) & 0x01;
assert(dsp == resultdsp);
assert(ach == resulth);
assert(acl == resultl);
return 0;
}
| gpl-2.0 |
yangyang1989/linux-2.6.32.2-mini2440 | usr/gen_init_cpio.c | 486 | 12543 | #include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <errno.h>
#include <ctype.h>
#include <limits.h>
/*
* Original work by Jeff Garzik
*
* External file lists, symlink, pipe and fifo support by Thayne Harbaugh
* Hard link support by Luciano Rocha
*/
#define xstr(s) #s
#define str(s) xstr(s)
static unsigned int offset;
static unsigned int ino = 721;
struct file_handler {
const char *type;
int (*handler)(const char *line);
};
static void push_string(const char *name)
{
unsigned int name_len = strlen(name) + 1;
fputs(name, stdout);
putchar(0);
offset += name_len;
}
static void push_pad (void)
{
while (offset & 3) {
putchar(0);
offset++;
}
}
static void push_rest(const char *name)
{
unsigned int name_len = strlen(name) + 1;
unsigned int tmp_ofs;
fputs(name, stdout);
putchar(0);
offset += name_len;
tmp_ofs = name_len + 110;
while (tmp_ofs & 3) {
putchar(0);
offset++;
tmp_ofs++;
}
}
static void push_hdr(const char *s)
{
fputs(s, stdout);
offset += 110;
}
static void cpio_trailer(void)
{
char s[256];
const char name[] = "TRAILER!!!";
sprintf(s, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
"070701", /* magic */
0, /* ino */
0, /* mode */
(long) 0, /* uid */
(long) 0, /* gid */
1, /* nlink */
(long) 0, /* mtime */
0, /* filesize */
0, /* major */
0, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name)+1, /* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
while (offset % 512) {
putchar(0);
offset++;
}
}
static int cpio_mkslink(const char *name, const char *target,
unsigned int mode, uid_t uid, gid_t gid)
{
char s[256];
time_t mtime = time(NULL);
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
"070701", /* magic */
ino++, /* ino */
S_IFLNK | mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) mtime, /* mtime */
(unsigned)strlen(target)+1, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_string(name);
push_pad();
push_string(target);
push_pad();
return 0;
}
static int cpio_mkslink_line(const char *line)
{
char name[PATH_MAX + 1];
char target[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (5 != sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX) "s %o %d %d", name, target, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized dir format '%s'", line);
goto fail;
}
rc = cpio_mkslink(name, target, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkgeneric(const char *name, unsigned int mode,
uid_t uid, gid_t gid)
{
char s[256];
time_t mtime = time(NULL);
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
"070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
2, /* nlink */
(long) mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
return 0;
}
enum generic_types {
GT_DIR,
GT_PIPE,
GT_SOCK
};
struct generic_type {
const char *type;
mode_t mode;
};
static struct generic_type generic_type_table[] = {
[GT_DIR] = {
.type = "dir",
.mode = S_IFDIR
},
[GT_PIPE] = {
.type = "pipe",
.mode = S_IFIFO
},
[GT_SOCK] = {
.type = "sock",
.mode = S_IFSOCK
}
};
static int cpio_mkgeneric_line(const char *line, enum generic_types gt)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (4 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d", name, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized %s format '%s'",
line, generic_type_table[gt].type);
goto fail;
}
mode |= generic_type_table[gt].mode;
rc = cpio_mkgeneric(name, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkdir_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_DIR);
}
static int cpio_mkpipe_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_PIPE);
}
static int cpio_mksock_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_SOCK);
}
static int cpio_mknod(const char *name, unsigned int mode,
uid_t uid, gid_t gid, char dev_type,
unsigned int maj, unsigned int min)
{
char s[256];
time_t mtime = time(NULL);
if (dev_type == 'b')
mode |= S_IFBLK;
else
mode |= S_IFCHR;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
"070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
maj, /* rmajor */
min, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
return 0;
}
static int cpio_mknod_line(const char *line)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
char dev_type;
unsigned int maj;
unsigned int min;
int rc = -1;
if (7 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d %c %u %u",
name, &mode, &uid, &gid, &dev_type, &maj, &min)) {
fprintf(stderr, "Unrecognized nod format '%s'", line);
goto fail;
}
rc = cpio_mknod(name, mode, uid, gid, dev_type, maj, min);
fail:
return rc;
}
static int cpio_mkfile(const char *name, const char *location,
unsigned int mode, uid_t uid, gid_t gid,
unsigned int nlinks)
{
char s[256];
char *filebuf = NULL;
struct stat buf;
long size;
int file = -1;
int retval;
int rc = -1;
int namesize;
int i;
mode |= S_IFREG;
retval = stat (location, &buf);
if (retval) {
fprintf (stderr, "File %s could not be located\n", location);
goto error;
}
file = open (location, O_RDONLY);
if (file < 0) {
fprintf (stderr, "File %s could not be opened for reading\n", location);
goto error;
}
filebuf = malloc(buf.st_size);
if (!filebuf) {
fprintf (stderr, "out of memory\n");
goto error;
}
retval = read (file, filebuf, buf.st_size);
if (retval < 0) {
fprintf (stderr, "Can not read %s file\n", location);
goto error;
}
size = 0;
for (i = 1; i <= nlinks; i++) {
/* data goes on last link */
if (i == nlinks) size = buf.st_size;
namesize = strlen(name) + 1;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08lX%08X%08X%08X%08X%08X%08X",
"070701", /* magic */
ino, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
nlinks, /* nlink */
(long) buf.st_mtime, /* mtime */
size, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
0); /* chksum */
push_hdr(s);
push_string(name);
push_pad();
if (size) {
fwrite(filebuf, size, 1, stdout);
offset += size;
push_pad();
}
name += namesize;
}
ino++;
rc = 0;
error:
if (filebuf) free(filebuf);
if (file >= 0) close(file);
return rc;
}
static char *cpio_replace_env(char *new_location)
{
char expanded[PATH_MAX + 1];
char env_var[PATH_MAX + 1];
char *start;
char *end;
for (start = NULL; (start = strstr(new_location, "${")); ) {
end = strchr(start, '}');
if (start < end) {
*env_var = *expanded = '\0';
strncat(env_var, start + 2, end - start - 2);
strncat(expanded, new_location, start - new_location);
strncat(expanded, getenv(env_var), PATH_MAX);
strncat(expanded, end + 1, PATH_MAX);
strncpy(new_location, expanded, PATH_MAX);
} else
break;
}
return new_location;
}
static int cpio_mkfile_line(const char *line)
{
char name[PATH_MAX + 1];
char *dname = NULL; /* malloc'ed buffer for hard links */
char location[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int nlinks = 1;
int end = 0, dname_len = 0;
int rc = -1;
if (5 > sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX)
"s %o %d %d %n",
name, location, &mode, &uid, &gid, &end)) {
fprintf(stderr, "Unrecognized file format '%s'", line);
goto fail;
}
if (end && isgraph(line[end])) {
int len;
int nend;
dname = malloc(strlen(line));
if (!dname) {
fprintf (stderr, "out of memory (%d)\n", dname_len);
goto fail;
}
dname_len = strlen(name) + 1;
memcpy(dname, name, dname_len);
do {
nend = 0;
if (sscanf(line + end, "%" str(PATH_MAX) "s %n",
name, &nend) < 1)
break;
len = strlen(name) + 1;
memcpy(dname + dname_len, name, len);
dname_len += len;
nlinks++;
end += nend;
} while (isgraph(line[end]));
} else {
dname = name;
}
rc = cpio_mkfile(dname, cpio_replace_env(location),
mode, uid, gid, nlinks);
fail:
if (dname_len) free(dname);
return rc;
}
static void usage(const char *prog)
{
fprintf(stderr, "Usage:\n"
"\t%s <cpio_list>\n"
"\n"
"<cpio_list> is a file containing newline separated entries that\n"
"describe the files to be included in the initramfs archive:\n"
"\n"
"# a comment\n"
"file <name> <location> <mode> <uid> <gid> [<hard links>]\n"
"dir <name> <mode> <uid> <gid>\n"
"nod <name> <mode> <uid> <gid> <dev_type> <maj> <min>\n"
"slink <name> <target> <mode> <uid> <gid>\n"
"pipe <name> <mode> <uid> <gid>\n"
"sock <name> <mode> <uid> <gid>\n"
"\n"
"<name> name of the file/dir/nod/etc in the archive\n"
"<location> location of the file in the current filesystem\n"
" expands shell variables quoted with ${}\n"
"<target> link target\n"
"<mode> mode/permissions of the file\n"
"<uid> user id (0=root)\n"
"<gid> group id (0=root)\n"
"<dev_type> device type (b=block, c=character)\n"
"<maj> major number of nod\n"
"<min> minor number of nod\n"
"<hard links> space separated list of other links to file\n"
"\n"
"example:\n"
"# A simple initramfs\n"
"dir /dev 0755 0 0\n"
"nod /dev/console 0600 0 0 c 5 1\n"
"dir /root 0700 0 0\n"
"dir /sbin 0755 0 0\n"
"file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n",
prog);
}
struct file_handler file_handler_table[] = {
{
.type = "file",
.handler = cpio_mkfile_line,
}, {
.type = "nod",
.handler = cpio_mknod_line,
}, {
.type = "dir",
.handler = cpio_mkdir_line,
}, {
.type = "slink",
.handler = cpio_mkslink_line,
}, {
.type = "pipe",
.handler = cpio_mkpipe_line,
}, {
.type = "sock",
.handler = cpio_mksock_line,
}, {
.type = NULL,
.handler = NULL,
}
};
#define LINE_SIZE (2 * PATH_MAX + 50)
int main (int argc, char *argv[])
{
FILE *cpio_list;
char line[LINE_SIZE];
char *args, *type;
int ec = 0;
int line_nr = 0;
if (2 != argc) {
usage(argv[0]);
exit(1);
}
if (!strcmp(argv[1], "-"))
cpio_list = stdin;
else if (! (cpio_list = fopen(argv[1], "r"))) {
fprintf(stderr, "ERROR: unable to open '%s': %s\n\n",
argv[1], strerror(errno));
usage(argv[0]);
exit(1);
}
while (fgets(line, LINE_SIZE, cpio_list)) {
int type_idx;
size_t slen = strlen(line);
line_nr++;
if ('#' == *line) {
/* comment - skip to next line */
continue;
}
if (! (type = strtok(line, " \t"))) {
fprintf(stderr,
"ERROR: incorrect format, could not locate file type line %d: '%s'\n",
line_nr, line);
ec = -1;
break;
}
if ('\n' == *type) {
/* a blank line */
continue;
}
if (slen == strlen(type)) {
/* must be an empty line */
continue;
}
if (! (args = strtok(NULL, "\n"))) {
fprintf(stderr,
"ERROR: incorrect format, newline required line %d: '%s'\n",
line_nr, line);
ec = -1;
}
for (type_idx = 0; file_handler_table[type_idx].type; type_idx++) {
int rc;
if (! strcmp(line, file_handler_table[type_idx].type)) {
if ((rc = file_handler_table[type_idx].handler(args))) {
ec = rc;
fprintf(stderr, " line %d\n", line_nr);
}
break;
}
}
if (NULL == file_handler_table[type_idx].type) {
fprintf(stderr, "unknown file type line %d: '%s'\n",
line_nr, line);
}
}
if (ec == 0)
cpio_trailer();
exit(ec);
}
| gpl-2.0 |
arshull/GalaTab3_KK_Kernel_T310 | arch/arm/mach-imx/mach-armadillo5x0.c | 2278 | 14764 | /*
* armadillo5x0.c
*
* Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
* updates in http://alberdroid.blogspot.com/
*
* Based on Atmark Techno, Inc. armadillo 500 BSP 2008
* Based on mx31ads.c and pcm037.c Great Work!
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/smsc911x.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/io.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/delay.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/memory.h>
#include <asm/mach/map.h>
#include <mach/common.h>
#include <mach/iomux-mx3.h>
#include <mach/ulpi.h>
#include "devices-imx31.h"
#include "crmregs-imx31.h"
static int armadillo5x0_pins[] = {
/* UART1 */
MX31_PIN_CTS1__CTS1,
MX31_PIN_RTS1__RTS1,
MX31_PIN_TXD1__TXD1,
MX31_PIN_RXD1__RXD1,
/* UART2 */
MX31_PIN_CTS2__CTS2,
MX31_PIN_RTS2__RTS2,
MX31_PIN_TXD2__TXD2,
MX31_PIN_RXD2__RXD2,
/* LAN9118_IRQ */
IOMUX_MODE(MX31_PIN_GPIO1_0, IOMUX_CONFIG_GPIO),
/* SDHC1 */
MX31_PIN_SD1_DATA3__SD1_DATA3,
MX31_PIN_SD1_DATA2__SD1_DATA2,
MX31_PIN_SD1_DATA1__SD1_DATA1,
MX31_PIN_SD1_DATA0__SD1_DATA0,
MX31_PIN_SD1_CLK__SD1_CLK,
MX31_PIN_SD1_CMD__SD1_CMD,
/* Framebuffer */
MX31_PIN_LD0__LD0,
MX31_PIN_LD1__LD1,
MX31_PIN_LD2__LD2,
MX31_PIN_LD3__LD3,
MX31_PIN_LD4__LD4,
MX31_PIN_LD5__LD5,
MX31_PIN_LD6__LD6,
MX31_PIN_LD7__LD7,
MX31_PIN_LD8__LD8,
MX31_PIN_LD9__LD9,
MX31_PIN_LD10__LD10,
MX31_PIN_LD11__LD11,
MX31_PIN_LD12__LD12,
MX31_PIN_LD13__LD13,
MX31_PIN_LD14__LD14,
MX31_PIN_LD15__LD15,
MX31_PIN_LD16__LD16,
MX31_PIN_LD17__LD17,
MX31_PIN_VSYNC3__VSYNC3,
MX31_PIN_HSYNC__HSYNC,
MX31_PIN_FPSHIFT__FPSHIFT,
MX31_PIN_DRDY0__DRDY0,
IOMUX_MODE(MX31_PIN_LCS1, IOMUX_CONFIG_GPIO), /*ADV7125_PSAVE*/
/* I2C2 */
MX31_PIN_CSPI2_MOSI__SCL,
MX31_PIN_CSPI2_MISO__SDA,
/* OTG */
MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
MX31_PIN_USBOTG_CLK__USBOTG_CLK,
MX31_PIN_USBOTG_DIR__USBOTG_DIR,
MX31_PIN_USBOTG_NXT__USBOTG_NXT,
MX31_PIN_USBOTG_STP__USBOTG_STP,
/* USB host 2 */
IOMUX_MODE(MX31_PIN_USBH2_CLK, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_USBH2_DIR, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_USBH2_NXT, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_USBH2_STP, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_USBH2_DATA0, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_USBH2_DATA1, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_STXD3, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_SRXD3, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_SCK3, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_SFS3, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_STXD6, IOMUX_CONFIG_FUNC),
IOMUX_MODE(MX31_PIN_SRXD6, IOMUX_CONFIG_FUNC),
};
/* USB */
#define OTG_RESET IOMUX_TO_GPIO(MX31_PIN_STXD4)
#define USBH2_RESET IOMUX_TO_GPIO(MX31_PIN_SCK6)
#define USBH2_CS IOMUX_TO_GPIO(MX31_PIN_GPIO1_3)
#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
static int usbotg_init(struct platform_device *pdev)
{
int err;
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
/* Chip already enabled by hardware */
/* OTG phy reset*/
err = gpio_request(OTG_RESET, "USB-OTG-RESET");
if (err) {
pr_err("Failed to request the usb otg reset gpio\n");
return err;
}
err = gpio_direction_output(OTG_RESET, 1/*HIGH*/);
if (err) {
pr_err("Failed to reset the usb otg phy\n");
goto otg_free_reset;
}
gpio_set_value(OTG_RESET, 0/*LOW*/);
mdelay(5);
gpio_set_value(OTG_RESET, 1/*HIGH*/);
mdelay(10);
return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
MXC_EHCI_INTERFACE_DIFF_UNI);
otg_free_reset:
gpio_free(OTG_RESET);
return err;
}
static int usbh2_init(struct platform_device *pdev)
{
int err;
mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
mxc_iomux_set_gpr(MUX_PGP_UH2, true);
/* Enable the chip */
err = gpio_request(USBH2_CS, "USB-H2-CS");
if (err) {
pr_err("Failed to request the usb host 2 CS gpio\n");
return err;
}
err = gpio_direction_output(USBH2_CS, 0/*Enabled*/);
if (err) {
pr_err("Failed to drive the usb host 2 CS gpio\n");
goto h2_free_cs;
}
/* H2 phy reset*/
err = gpio_request(USBH2_RESET, "USB-H2-RESET");
if (err) {
pr_err("Failed to request the usb host 2 reset gpio\n");
goto h2_free_cs;
}
err = gpio_direction_output(USBH2_RESET, 1/*HIGH*/);
if (err) {
pr_err("Failed to reset the usb host 2 phy\n");
goto h2_free_reset;
}
gpio_set_value(USBH2_RESET, 0/*LOW*/);
mdelay(5);
gpio_set_value(USBH2_RESET, 1/*HIGH*/);
mdelay(10);
return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
MXC_EHCI_INTERFACE_DIFF_UNI);
h2_free_reset:
gpio_free(USBH2_RESET);
h2_free_cs:
gpio_free(USBH2_CS);
return err;
}
static struct mxc_usbh_platform_data usbotg_pdata __initdata = {
.init = usbotg_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
};
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
};
/* RTC over I2C*/
#define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4)
static struct i2c_board_info armadillo5x0_i2c_rtc = {
I2C_BOARD_INFO("s35390a", 0x30),
};
/* GPIO BUTTONS */
static struct gpio_keys_button armadillo5x0_buttons[] = {
{
.code = KEY_ENTER, /*28*/
.gpio = IOMUX_TO_GPIO(MX31_PIN_SCLK0),
.active_low = 1,
.desc = "menu",
.wakeup = 1,
}, {
.code = KEY_BACK, /*158*/
.gpio = IOMUX_TO_GPIO(MX31_PIN_SRST0),
.active_low = 1,
.desc = "back",
.wakeup = 1,
}
};
static const struct gpio_keys_platform_data
armadillo5x0_button_data __initconst = {
.buttons = armadillo5x0_buttons,
.nbuttons = ARRAY_SIZE(armadillo5x0_buttons),
};
/*
* NAND Flash
*/
static const struct mxc_nand_platform_data
armadillo5x0_nand_board_info __initconst = {
.width = 1,
.hw_ecc = 1,
};
/*
* MTD NOR Flash
*/
static struct mtd_partition armadillo5x0_nor_flash_partitions[] = {
{
.name = "nor.bootloader",
.offset = 0x00000000,
.size = 4*32*1024,
}, {
.name = "nor.kernel",
.offset = MTDPART_OFS_APPEND,
.size = 16*128*1024,
}, {
.name = "nor.userland",
.offset = MTDPART_OFS_APPEND,
.size = 110*128*1024,
}, {
.name = "nor.config",
.offset = MTDPART_OFS_APPEND,
.size = 1*128*1024,
},
};
static struct physmap_flash_data armadillo5x0_nor_flash_pdata = {
.width = 2,
.parts = armadillo5x0_nor_flash_partitions,
.nr_parts = ARRAY_SIZE(armadillo5x0_nor_flash_partitions),
};
static struct resource armadillo5x0_nor_flash_resource = {
.flags = IORESOURCE_MEM,
.start = MX31_CS0_BASE_ADDR,
.end = MX31_CS0_BASE_ADDR + SZ_64M - 1,
};
static struct platform_device armadillo5x0_nor_flash = {
.name = "physmap-flash",
.id = -1,
.num_resources = 1,
.resource = &armadillo5x0_nor_flash_resource,
};
/*
* FB support
*/
static const struct fb_videomode fb_modedb[] = {
{ /* 640x480 @ 60 Hz */
.name = "CRT-VGA",
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 39721,
.left_margin = 35,
.right_margin = 115,
.upper_margin = 43,
.lower_margin = 1,
.hsync_len = 10,
.vsync_len = 1,
.sync = FB_SYNC_OE_ACT_HIGH,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
}, {/* 800x600 @ 56 Hz */
.name = "CRT-SVGA",
.refresh = 56,
.xres = 800,
.yres = 600,
.pixclock = 30000,
.left_margin = 30,
.right_margin = 108,
.upper_margin = 13,
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 1,
.sync = FB_SYNC_OE_ACT_HIGH | FB_SYNC_HOR_HIGH_ACT |
FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
};
static const struct ipu_platform_data mx3_ipu_data __initconst = {
.irq_base = MXC_IPU_IRQ_START,
};
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "CRT-VGA",
.mode = fb_modedb,
.num_modes = ARRAY_SIZE(fb_modedb),
};
/*
* SDHC 1
* MMC support
*/
static int armadillo5x0_sdhc1_get_ro(struct device *dev)
{
return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_ATA_RESET_B));
}
static int armadillo5x0_sdhc1_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
int ret;
int gpio_det, gpio_wp;
gpio_det = IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK);
gpio_wp = IOMUX_TO_GPIO(MX31_PIN_ATA_RESET_B);
ret = gpio_request(gpio_det, "sdhc-card-detect");
if (ret)
return ret;
gpio_direction_input(gpio_det);
ret = gpio_request(gpio_wp, "sdhc-write-protect");
if (ret)
goto err_gpio_free;
gpio_direction_input(gpio_wp);
/* When supported the trigger type have to be BOTH */
ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_ATA_DMACK), detect_irq,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
"sdhc-detect", data);
if (ret)
goto err_gpio_free_2;
return 0;
err_gpio_free_2:
gpio_free(gpio_wp);
err_gpio_free:
gpio_free(gpio_det);
return ret;
}
static void armadillo5x0_sdhc1_exit(struct device *dev, void *data)
{
free_irq(IOMUX_TO_IRQ(MX31_PIN_ATA_DMACK), data);
gpio_free(IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK));
gpio_free(IOMUX_TO_GPIO(MX31_PIN_ATA_RESET_B));
}
static const struct imxmmc_platform_data sdhc_pdata __initconst = {
.get_ro = armadillo5x0_sdhc1_get_ro,
.init = armadillo5x0_sdhc1_init,
.exit = armadillo5x0_sdhc1_exit,
};
/*
* SMSC 9118
* Network support
*/
static struct resource armadillo5x0_smc911x_resources[] = {
{
.start = MX31_CS3_BASE_ADDR,
.end = MX31_CS3_BASE_ADDR + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
.end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
static struct smsc911x_platform_config smsc911x_info = {
.flags = SMSC911X_USE_16BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
};
static struct platform_device armadillo5x0_smc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(armadillo5x0_smc911x_resources),
.resource = armadillo5x0_smc911x_resources,
.dev = {
.platform_data = &smsc911x_info,
},
};
/* UART device data */
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static struct platform_device *devices[] __initdata = {
&armadillo5x0_smc911x_device,
};
/*
* Perform board specific initializations
*/
static void __init armadillo5x0_init(void)
{
mxc_iomux_setup_multiple_pins(armadillo5x0_pins,
ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
platform_add_devices(devices, ARRAY_SIZE(devices));
imx_add_gpio_keys(&armadillo5x0_button_data);
imx31_add_imx_i2c1(NULL);
/* Register UART */
imx31_add_imx_uart0(&uart_pdata);
imx31_add_imx_uart1(&uart_pdata);
/* SMSC9118 IRQ pin */
gpio_direction_input(MX31_PIN_GPIO1_0);
/* Register SDHC */
imx31_add_mxc_mmc(0, &sdhc_pdata);
/* Register FB */
imx31_add_ipu_core(&mx3_ipu_data);
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
/* Register NOR Flash */
mxc_register_device(&armadillo5x0_nor_flash,
&armadillo5x0_nor_flash_pdata);
/* Register NAND Flash */
imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
/* set NAND page size to 2k if not configured via boot mode pins */
__raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
/* RTC */
/* Get RTC IRQ and register the chip */
if (gpio_request(ARMADILLO5X0_RTC_GPIO, "rtc") == 0) {
if (gpio_direction_input(ARMADILLO5X0_RTC_GPIO) == 0)
armadillo5x0_i2c_rtc.irq = gpio_to_irq(ARMADILLO5X0_RTC_GPIO);
else
gpio_free(ARMADILLO5X0_RTC_GPIO);
}
if (armadillo5x0_i2c_rtc.irq == 0)
pr_warning("armadillo5x0_init: failed to get RTC IRQ\n");
i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1);
/* USB */
usbotg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (usbotg_pdata.otg)
imx31_add_mxc_ehci_otg(&usbotg_pdata);
usbh2_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (usbh2_pdata.otg)
imx31_add_mxc_ehci_hs(2, &usbh2_pdata);
}
static void __init armadillo5x0_timer_init(void)
{
mx31_clocks_init(26000000);
}
static struct sys_timer armadillo5x0_timer = {
.init = armadillo5x0_timer_init,
};
MACHINE_START(ARMADILLO5X0, "Armadillo-500")
/* Maintainer: Alberto Panizzo */
.boot_params = MX3x_PHYS_OFFSET + 0x100,
.map_io = mx31_map_io,
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
.timer = &armadillo5x0_timer,
.init_machine = armadillo5x0_init,
MACHINE_END
| gpl-2.0 |
drmarble/android_kernel_bn_encore | drivers/staging/generic_serial/rio/riocmd.c | 2534 | 29373 | /*
** -----------------------------------------------------------------------------
**
** Perle Specialix driver for Linux
** ported from the existing SCO driver source
**
*
* (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
** Module : riocmd.c
** SID : 1.2
** Last Modified : 11/6/98 10:33:41
** Retrieved : 11/6/98 10:33:49
**
** ident @(#)riocmd.c 1.2
**
** -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <linux/termios.h>
#include <linux/serial.h>
#include <linux/generic_serial.h>
#include "linux_compat.h"
#include "rio_linux.h"
#include "pkt.h"
#include "daemon.h"
#include "rio.h"
#include "riospace.h"
#include "cmdpkt.h"
#include "map.h"
#include "rup.h"
#include "port.h"
#include "riodrvr.h"
#include "rioinfo.h"
#include "func.h"
#include "errors.h"
#include "pci.h"
#include "parmmap.h"
#include "unixrup.h"
#include "board.h"
#include "host.h"
#include "phb.h"
#include "link.h"
#include "cmdblk.h"
#include "route.h"
#include "cirrus.h"
static struct IdentifyRta IdRta;
static struct KillNeighbour KillUnit;
int RIOFoadRta(struct Host *HostP, struct Map *MapP)
{
struct CmdBlk *CmdBlkP;
rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA\n");
CmdBlkP = RIOGetCmdBlk();
if (!CmdBlkP) {
rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.src_port = BOOT_RUP;
CmdBlkP->Packet.len = 0x84;
CmdBlkP->Packet.data[0] = IFOAD;
CmdBlkP->Packet.data[1] = 0;
CmdBlkP->Packet.data[2] = IFOAD_MAGIC & 0xFF;
CmdBlkP->Packet.data[3] = (IFOAD_MAGIC >> 8) & 0xFF;
if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: Failed to queue foad command\n");
return -EIO;
}
return 0;
}
int RIOZombieRta(struct Host *HostP, struct Map *MapP)
{
struct CmdBlk *CmdBlkP;
rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA\n");
CmdBlkP = RIOGetCmdBlk();
if (!CmdBlkP) {
rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.src_port = BOOT_RUP;
CmdBlkP->Packet.len = 0x84;
CmdBlkP->Packet.data[0] = ZOMBIE;
CmdBlkP->Packet.data[1] = 0;
CmdBlkP->Packet.data[2] = ZOMBIE_MAGIC & 0xFF;
CmdBlkP->Packet.data[3] = (ZOMBIE_MAGIC >> 8) & 0xFF;
if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: Failed to queue zombie command\n");
return -EIO;
}
return 0;
}
int RIOCommandRta(struct rio_info *p, unsigned long RtaUnique, int (*func) (struct Host * HostP, struct Map * MapP))
{
unsigned int Host;
rio_dprintk(RIO_DEBUG_CMD, "Command RTA 0x%lx func %p\n", RtaUnique, func);
if (!RtaUnique)
return (0);
for (Host = 0; Host < p->RIONumHosts; Host++) {
unsigned int Rta;
struct Host *HostP = &p->RIOHosts[Host];
for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
struct Map *MapP = &HostP->Mapping[Rta];
if (MapP->RtaUniqueNum == RtaUnique) {
uint Link;
/*
** now, lets just check we have a route to it...
** IF the routing stuff is working, then one of the
** topology entries for this unit will have a legit
** route *somewhere*. We care not where - if its got
** any connections, we can get to it.
*/
for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
if (MapP->Topology[Link].Unit <= (u8) MAX_RUP) {
/*
** Its worth trying the operation...
*/
return (*func) (HostP, MapP);
}
}
}
}
}
return -ENXIO;
}
int RIOIdentifyRta(struct rio_info *p, void __user * arg)
{
unsigned int Host;
if (copy_from_user(&IdRta, arg, sizeof(IdRta))) {
rio_dprintk(RIO_DEBUG_CMD, "RIO_IDENTIFY_RTA copy failed\n");
p->RIOError.Error = COPYIN_FAILED;
return -EFAULT;
}
for (Host = 0; Host < p->RIONumHosts; Host++) {
unsigned int Rta;
struct Host *HostP = &p->RIOHosts[Host];
for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
struct Map *MapP = &HostP->Mapping[Rta];
if (MapP->RtaUniqueNum == IdRta.RtaUnique) {
uint Link;
/*
** now, lets just check we have a route to it...
** IF the routing stuff is working, then one of the
** topology entries for this unit will have a legit
** route *somewhere*. We care not where - if its got
** any connections, we can get to it.
*/
for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
if (MapP->Topology[Link].Unit <= (u8) MAX_RUP) {
/*
** Its worth trying the operation...
*/
struct CmdBlk *CmdBlkP;
rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA\n");
CmdBlkP = RIOGetCmdBlk();
if (!CmdBlkP) {
rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.src_port = BOOT_RUP;
CmdBlkP->Packet.len = 0x84;
CmdBlkP->Packet.data[0] = IDENTIFY;
CmdBlkP->Packet.data[1] = 0;
CmdBlkP->Packet.data[2] = IdRta.ID;
if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: Failed to queue command\n");
return -EIO;
}
return 0;
}
}
}
}
}
return -ENOENT;
}
int RIOKillNeighbour(struct rio_info *p, void __user * arg)
{
uint Host;
uint ID;
struct Host *HostP;
struct CmdBlk *CmdBlkP;
rio_dprintk(RIO_DEBUG_CMD, "KILL HOST NEIGHBOUR\n");
if (copy_from_user(&KillUnit, arg, sizeof(KillUnit))) {
rio_dprintk(RIO_DEBUG_CMD, "RIO_KILL_NEIGHBOUR copy failed\n");
p->RIOError.Error = COPYIN_FAILED;
return -EFAULT;
}
if (KillUnit.Link > 3)
return -ENXIO;
CmdBlkP = RIOGetCmdBlk();
if (!CmdBlkP) {
rio_dprintk(RIO_DEBUG_CMD, "UFOAD: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = 0;
CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.dest_port = BOOT_RUP;
CmdBlkP->Packet.src_port = BOOT_RUP;
CmdBlkP->Packet.len = 0x84;
CmdBlkP->Packet.data[0] = UFOAD;
CmdBlkP->Packet.data[1] = KillUnit.Link;
CmdBlkP->Packet.data[2] = UFOAD_MAGIC & 0xFF;
CmdBlkP->Packet.data[3] = (UFOAD_MAGIC >> 8) & 0xFF;
for (Host = 0; Host < p->RIONumHosts; Host++) {
ID = 0;
HostP = &p->RIOHosts[Host];
if (HostP->UniqueNum == KillUnit.UniqueNum) {
if (RIOQueueCmdBlk(HostP, RTAS_PER_HOST + KillUnit.Link, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
return -EIO;
}
return 0;
}
for (ID = 0; ID < RTAS_PER_HOST; ID++) {
if (HostP->Mapping[ID].RtaUniqueNum == KillUnit.UniqueNum) {
CmdBlkP->Packet.dest_unit = ID + 1;
if (RIOQueueCmdBlk(HostP, ID, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
return -EIO;
}
return 0;
}
}
}
RIOFreeCmdBlk(CmdBlkP);
return -ENXIO;
}
int RIOSuspendBootRta(struct Host *HostP, int ID, int Link)
{
struct CmdBlk *CmdBlkP;
rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA ID %d, link %c\n", ID, 'A' + Link);
CmdBlkP = RIOGetCmdBlk();
if (!CmdBlkP) {
rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.src_port = BOOT_RUP;
CmdBlkP->Packet.len = 0x84;
CmdBlkP->Packet.data[0] = IWAIT;
CmdBlkP->Packet.data[1] = Link;
CmdBlkP->Packet.data[2] = IWAIT_MAGIC & 0xFF;
CmdBlkP->Packet.data[3] = (IWAIT_MAGIC >> 8) & 0xFF;
if (RIOQueueCmdBlk(HostP, ID - 1, CmdBlkP) == RIO_FAIL) {
rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: Failed to queue iwait command\n");
return -EIO;
}
return 0;
}
int RIOFoadWakeup(struct rio_info *p)
{
int port;
struct Port *PortP;
unsigned long flags;
for (port = 0; port < RIO_PORTS; port++) {
PortP = p->RIOPortp[port];
rio_spin_lock_irqsave(&PortP->portSem, flags);
PortP->Config = 0;
PortP->State = 0;
PortP->InUse = NOT_INUSE;
PortP->PortState = 0;
PortP->FlushCmdBodge = 0;
PortP->ModemLines = 0;
PortP->ModemState = 0;
PortP->CookMode = 0;
PortP->ParamSem = 0;
PortP->Mapped = 0;
PortP->WflushFlag = 0;
PortP->MagicFlags = 0;
PortP->RxDataStart = 0;
PortP->TxBufferIn = 0;
PortP->TxBufferOut = 0;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
}
return (0);
}
/*
** Incoming command on the COMMAND_RUP to be processed.
*/
static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struct PKT __iomem *PacketP)
{
struct PktCmd __iomem *PktCmdP = (struct PktCmd __iomem *)PacketP->data;
struct Port *PortP;
struct UnixRup *UnixRupP;
unsigned short SysPort;
unsigned short ReportedModemStatus;
unsigned short rup;
unsigned short subCommand;
unsigned long flags;
func_enter();
/*
** 16 port RTA note:
** Command rup packets coming from the RTA will have pkt->data[1] (which
** translates to PktCmdP->PhbNum) set to the host port number for the
** particular unit. To access the correct BaseSysPort for a 16 port RTA,
** we can use PhbNum to get the rup number for the appropriate 8 port
** block (for the first block, this should be equal to 'Rup').
*/
rup = readb(&PktCmdP->PhbNum) / (unsigned short) PORTS_PER_RTA;
UnixRupP = &HostP->UnixRups[rup];
SysPort = UnixRupP->BaseSysPort + (readb(&PktCmdP->PhbNum) % (unsigned short) PORTS_PER_RTA);
rio_dprintk(RIO_DEBUG_CMD, "Command on rup %d, port %d\n", rup, SysPort);
if (UnixRupP->BaseSysPort == NO_PORT) {
rio_dprintk(RIO_DEBUG_CMD, "OBSCURE ERROR!\n");
rio_dprintk(RIO_DEBUG_CMD, "Diagnostics follow. Please WRITE THESE DOWN and report them to Specialix Technical Support\n");
rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Host number %Zd, name ``%s''\n", HostP - p->RIOHosts, HostP->Name);
rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Rup number 0x%x\n", rup);
if (Rup < (unsigned short) MAX_RUP) {
rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for RTA ``%s''\n", HostP->Mapping[Rup].Name);
} else
rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for link ``%c'' of host ``%s''\n", ('A' + Rup - MAX_RUP), HostP->Name);
rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Destination 0x%x:0x%x\n", readb(&PacketP->dest_unit), readb(&PacketP->dest_port));
rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Source 0x%x:0x%x\n", readb(&PacketP->src_unit), readb(&PacketP->src_port));
rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Length 0x%x (%d)\n", readb(&PacketP->len), readb(&PacketP->len));
rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Control 0x%x (%d)\n", readb(&PacketP->control), readb(&PacketP->control));
rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Check 0x%x (%d)\n", readw(&PacketP->csum), readw(&PacketP->csum));
rio_dprintk(RIO_DEBUG_CMD, "COMMAND information: Host Port Number 0x%x, " "Command Code 0x%x\n", readb(&PktCmdP->PhbNum), readb(&PktCmdP->Command));
return 1;
}
PortP = p->RIOPortp[SysPort];
rio_spin_lock_irqsave(&PortP->portSem, flags);
switch (readb(&PktCmdP->Command)) {
case RIOC_BREAK_RECEIVED:
rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n");
/* If the current line disc. is not multi-threading and
the current processor is not the default, reset rup_intr
and return 0 to ensure that the command packet is
not freed. */
/* Call tmgr HANGUP HERE */
/* Fix this later when every thing works !!!! RAMRAJ */
gs_got_break(&PortP->gs);
break;
case RIOC_COMPLETE:
rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts);
subCommand = 1;
switch (readb(&PktCmdP->SubCommand)) {
case RIOC_MEMDUMP:
rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr));
break;
case RIOC_READ_REGISTER:
rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr));
p->CdRegister = (readb(&PktCmdP->ModemStatus) & RIOC_MSVR1_HOST);
break;
default:
subCommand = 0;
break;
}
if (subCommand)
break;
rio_dprintk(RIO_DEBUG_CMD, "New status is 0x%x was 0x%x\n", readb(&PktCmdP->PortStatus), PortP->PortState);
if (PortP->PortState != readb(&PktCmdP->PortStatus)) {
rio_dprintk(RIO_DEBUG_CMD, "Mark status & wakeup\n");
PortP->PortState = readb(&PktCmdP->PortStatus);
/* What should we do here ...
wakeup( &PortP->PortState );
*/
} else
rio_dprintk(RIO_DEBUG_CMD, "No change\n");
/* FALLTHROUGH */
case RIOC_MODEM_STATUS:
/*
** Knock out the tbusy and tstop bits, as these are not relevant
** to the check for modem status change (they're just there because
** it's a convenient place to put them!).
*/
ReportedModemStatus = readb(&PktCmdP->ModemStatus);
if ((PortP->ModemState & RIOC_MSVR1_HOST) ==
(ReportedModemStatus & RIOC_MSVR1_HOST)) {
rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
/*
** Update ModemState just in case tbusy or tstop states have
** changed.
*/
PortP->ModemState = ReportedModemStatus;
} else {
rio_dprintk(RIO_DEBUG_CMD, "Modem status change from 0x%x to 0x%x\n", PortP->ModemState, ReportedModemStatus);
PortP->ModemState = ReportedModemStatus;
#ifdef MODEM_SUPPORT
if (PortP->Mapped) {
/***********************************************************\
*************************************************************
*** ***
*** M O D E M S T A T E C H A N G E ***
*** ***
*************************************************************
\***********************************************************/
/*
** If the device is a modem, then check the modem
** carrier.
*/
if (PortP->gs.port.tty == NULL)
break;
if (PortP->gs.port.tty->termios == NULL)
break;
if (!(PortP->gs.port.tty->termios->c_cflag & CLOCAL) && ((PortP->State & (RIO_MOPEN | RIO_WOPEN)))) {
rio_dprintk(RIO_DEBUG_CMD, "Is there a Carrier?\n");
/*
** Is there a carrier?
*/
if (PortP->ModemState & RIOC_MSVR1_CD) {
/*
** Has carrier just appeared?
*/
if (!(PortP->State & RIO_CARR_ON)) {
rio_dprintk(RIO_DEBUG_CMD, "Carrier just came up.\n");
PortP->State |= RIO_CARR_ON;
/*
** wakeup anyone in WOPEN
*/
if (PortP->State & (PORT_ISOPEN | RIO_WOPEN))
wake_up_interruptible(&PortP->gs.port.open_wait);
}
} else {
/*
** Has carrier just dropped?
*/
if (PortP->State & RIO_CARR_ON) {
if (PortP->State & (PORT_ISOPEN | RIO_WOPEN | RIO_MOPEN))
tty_hangup(PortP->gs.port.tty);
PortP->State &= ~RIO_CARR_ON;
rio_dprintk(RIO_DEBUG_CMD, "Carrirer just went down\n");
}
}
}
}
#endif
}
break;
default:
rio_dprintk(RIO_DEBUG_CMD, "Unknown command %d on CMD_RUP of host %Zd\n", readb(&PktCmdP->Command), HostP - p->RIOHosts);
break;
}
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
func_exit();
return 1;
}
/*
** The command mechanism:
** Each rup has a chain of commands associated with it.
** This chain is maintained by routines in this file.
** Periodically we are called and we run a quick check of all the
** active chains to determine if there is a command to be executed,
** and if the rup is ready to accept it.
**
*/
/*
** Allocate an empty command block.
*/
struct CmdBlk *RIOGetCmdBlk(void)
{
struct CmdBlk *CmdBlkP;
CmdBlkP = kzalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
return CmdBlkP;
}
/*
** Return a block to the head of the free list.
*/
void RIOFreeCmdBlk(struct CmdBlk *CmdBlkP)
{
kfree(CmdBlkP);
}
/*
** attach a command block to the list of commands to be performed for
** a given rup.
*/
int RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
{
struct CmdBlk **Base;
struct UnixRup *UnixRupP;
unsigned long flags;
if (Rup >= (unsigned short) (MAX_RUP + LINKS_PER_UNIT)) {
rio_dprintk(RIO_DEBUG_CMD, "Illegal rup number %d in RIOQueueCmdBlk\n", Rup);
RIOFreeCmdBlk(CmdBlkP);
return RIO_FAIL;
}
UnixRupP = &HostP->UnixRups[Rup];
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
/*
** If the RUP is currently inactive, then put the request
** straight on the RUP....
*/
if ((UnixRupP->CmdsWaitingP == NULL) && (UnixRupP->CmdPendingP == NULL) && (readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE) && (CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP)
: 1)) {
rio_dprintk(RIO_DEBUG_CMD, "RUP inactive-placing command straight on. Cmd byte is 0x%x\n", CmdBlkP->Packet.data[0]);
/*
** Whammy! blat that pack!
*/
HostP->Copy(&CmdBlkP->Packet, RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->txpkt)), sizeof(struct PKT));
/*
** place command packet on the pending position.
*/
UnixRupP->CmdPendingP = CmdBlkP;
/*
** set the command register
*/
writew(TX_PACKET_READY, &UnixRupP->RupP->txcontrol);
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
return 0;
}
rio_dprintk(RIO_DEBUG_CMD, "RUP active - en-queing\n");
if (UnixRupP->CmdsWaitingP != NULL)
rio_dprintk(RIO_DEBUG_CMD, "Rup active - command waiting\n");
if (UnixRupP->CmdPendingP != NULL)
rio_dprintk(RIO_DEBUG_CMD, "Rup active - command pending\n");
if (readw(&UnixRupP->RupP->txcontrol) != TX_RUP_INACTIVE)
rio_dprintk(RIO_DEBUG_CMD, "Rup active - command rup not ready\n");
Base = &UnixRupP->CmdsWaitingP;
rio_dprintk(RIO_DEBUG_CMD, "First try to queue cmdblk %p at %p\n", CmdBlkP, Base);
while (*Base) {
rio_dprintk(RIO_DEBUG_CMD, "Command cmdblk %p here\n", *Base);
Base = &((*Base)->NextP);
rio_dprintk(RIO_DEBUG_CMD, "Now try to queue cmd cmdblk %p at %p\n", CmdBlkP, Base);
}
rio_dprintk(RIO_DEBUG_CMD, "Will queue cmdblk %p at %p\n", CmdBlkP, Base);
*Base = CmdBlkP;
CmdBlkP->NextP = NULL;
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
return 0;
}
/*
** Here we go - if there is an empty rup, fill it!
** must be called at splrio() or higher.
*/
void RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
{
struct CmdBlk *CmdBlkP;
struct UnixRup *UnixRupP;
struct PKT __iomem *PacketP;
unsigned short Rup;
unsigned long flags;
Rup = MAX_RUP + LINKS_PER_UNIT;
do { /* do this loop for each RUP */
/*
** locate the rup we are processing & lock it
*/
UnixRupP = &HostP->UnixRups[--Rup];
spin_lock_irqsave(&UnixRupP->RupLock, flags);
/*
** First check for incoming commands:
*/
if (readw(&UnixRupP->RupP->rxcontrol) != RX_RUP_INACTIVE) {
int FreeMe;
PacketP = (struct PKT __iomem *) RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->rxpkt));
switch (readb(&PacketP->dest_port)) {
case BOOT_RUP:
rio_dprintk(RIO_DEBUG_CMD, "Incoming Boot %s packet '%x'\n", readb(&PacketP->len) & 0x80 ? "Command" : "Data", readb(&PacketP->data[0]));
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
FreeMe = RIOBootRup(p, Rup, HostP, PacketP);
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
break;
case COMMAND_RUP:
/*
** Free the RUP lock as loss of carrier causes a
** ttyflush which will (eventually) call another
** routine that uses the RUP lock.
*/
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
FreeMe = RIOCommandRup(p, Rup, HostP, PacketP);
if (readb(&PacketP->data[5]) == RIOC_MEMDUMP) {
rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6])));
rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32);
}
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
break;
case ROUTE_RUP:
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
FreeMe = RIORouteRup(p, Rup, HostP, PacketP);
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
break;
default:
rio_dprintk(RIO_DEBUG_CMD, "Unknown RUP %d\n", readb(&PacketP->dest_port));
FreeMe = 1;
break;
}
if (FreeMe) {
rio_dprintk(RIO_DEBUG_CMD, "Free processed incoming command packet\n");
put_free_end(HostP, PacketP);
writew(RX_RUP_INACTIVE, &UnixRupP->RupP->rxcontrol);
if (readw(&UnixRupP->RupP->handshake) == PHB_HANDSHAKE_SET) {
rio_dprintk(RIO_DEBUG_CMD, "Handshake rup %d\n", Rup);
writew(PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET, &UnixRupP->RupP->handshake);
}
}
}
/*
** IF a command was running on the port,
** and it has completed, then tidy it up.
*/
if ((CmdBlkP = UnixRupP->CmdPendingP) && /* ASSIGN! */
(readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
/*
** we are idle.
** there is a command in pending.
** Therefore, this command has finished.
** So, wakeup whoever is waiting for it (and tell them
** what happened).
*/
if (CmdBlkP->Packet.dest_port == BOOT_RUP)
rio_dprintk(RIO_DEBUG_CMD, "Free Boot %s Command Block '%x'\n", CmdBlkP->Packet.len & 0x80 ? "Command" : "Data", CmdBlkP->Packet.data[0]);
rio_dprintk(RIO_DEBUG_CMD, "Command %p completed\n", CmdBlkP);
/*
** Clear the Rup lock to prevent mutual exclusion.
*/
if (CmdBlkP->PostFuncP) {
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
(*CmdBlkP->PostFuncP) (CmdBlkP->PostArg, CmdBlkP);
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
}
/*
** ....clear the pending flag....
*/
UnixRupP->CmdPendingP = NULL;
/*
** ....and return the command block to the freelist.
*/
RIOFreeCmdBlk(CmdBlkP);
}
/*
** If there is a command for this rup, and the rup
** is idle, then process the command
*/
if ((CmdBlkP = UnixRupP->CmdsWaitingP) && /* ASSIGN! */
(UnixRupP->CmdPendingP == NULL) && (readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
/*
** if the pre-function is non-zero, call it.
** If it returns RIO_FAIL then don't
** send this command yet!
*/
if (!(CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP) : 1)) {
rio_dprintk(RIO_DEBUG_CMD, "Not ready to start command %p\n", CmdBlkP);
} else {
rio_dprintk(RIO_DEBUG_CMD, "Start new command %p Cmd byte is 0x%x\n", CmdBlkP, CmdBlkP->Packet.data[0]);
/*
** Whammy! blat that pack!
*/
HostP->Copy(&CmdBlkP->Packet, RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->txpkt)), sizeof(struct PKT));
/*
** remove the command from the rup command queue...
*/
UnixRupP->CmdsWaitingP = CmdBlkP->NextP;
/*
** ...and place it on the pending position.
*/
UnixRupP->CmdPendingP = CmdBlkP;
/*
** set the command register
*/
writew(TX_PACKET_READY, &UnixRupP->RupP->txcontrol);
/*
** the command block will be freed
** when the command has been processed.
*/
}
}
spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
} while (Rup);
}
int RIOWFlushMark(unsigned long iPortP, struct CmdBlk *CmdBlkP)
{
struct Port *PortP = (struct Port *) iPortP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
PortP->WflushFlag++;
PortP->MagicFlags |= MAGIC_FLUSH;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return RIOUnUse(iPortP, CmdBlkP);
}
int RIORFlushEnable(unsigned long iPortP, struct CmdBlk *CmdBlkP)
{
struct Port *PortP = (struct Port *) iPortP;
struct PKT __iomem *PacketP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
while (can_remove_receive(&PacketP, PortP)) {
remove_receive(PortP);
put_free_end(PortP->HostP, PacketP);
}
if (readw(&PortP->PhbP->handshake) == PHB_HANDSHAKE_SET) {
/*
** MAGIC! (Basically, handshake the RX buffer, so that
** the RTAs upstream can be re-enabled.)
*/
rio_dprintk(RIO_DEBUG_CMD, "Util: Set RX handshake bit\n");
writew(PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET, &PortP->PhbP->handshake);
}
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return RIOUnUse(iPortP, CmdBlkP);
}
int RIOUnUse(unsigned long iPortP, struct CmdBlk *CmdBlkP)
{
struct Port *PortP = (struct Port *) iPortP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
rio_dprintk(RIO_DEBUG_CMD, "Decrement in use count for port\n");
if (PortP->InUse) {
if (--PortP->InUse != NOT_INUSE) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return 0;
}
}
/*
** While PortP->InUse is set (i.e. a preemptive command has been sent to
** the RTA and is awaiting completion), any transmit data is prevented from
** being transferred from the write queue into the transmit packets
** (add_transmit) and no furthur transmit interrupt will be sent for that
** data. The next interrupt will occur up to 500ms later (RIOIntr is called
** twice a second as a safety measure). This was the case when kermit was
** used to send data into a RIO port. After each packet was sent, TCFLSH
** was called to flush the read queue preemptively. PortP->InUse was
** incremented, thereby blocking the 6 byte acknowledgement packet
** transmitted back. This acknowledgment hung around for 500ms before
** being sent, thus reducing input performance substantially!.
** When PortP->InUse becomes NOT_INUSE, we must ensure that any data
** hanging around in the transmit buffer is sent immediately.
*/
writew(1, &PortP->HostP->ParmMapP->tx_intr);
/* What to do here ..
wakeup( (caddr_t)&(PortP->InUse) );
*/
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return 0;
}
/*
**
** How to use this file:
**
** To send a command down a rup, you need to allocate a command block, fill
** in the packet information, fill in the command number, fill in the pre-
** and post- functions and arguments, and then add the command block to the
** queue of command blocks for the port in question. When the port is idle,
** then the pre-function will be called. If this returns RIO_FAIL then the
** command will be re-queued and tried again at a later date (probably in one
** clock tick). If the pre-function returns NOT RIO_FAIL, then the command
** packet will be queued on the RUP, and the txcontrol field set to the
** command number. When the txcontrol field has changed from being the
** command number, then the post-function will be called, with the argument
** specified earlier, a pointer to the command block, and the value of
** txcontrol.
**
** To allocate a command block, call RIOGetCmdBlk(). This returns a pointer
** to the command block structure allocated, or NULL if there aren't any.
** The block will have been zeroed for you.
**
** The structure has the following fields:
**
** struct CmdBlk
** {
** struct CmdBlk *NextP; ** Pointer to next command block **
** struct PKT Packet; ** A packet, to copy to the rup **
** int (*PreFuncP)(); ** The func to call to check if OK **
** int PreArg; ** The arg for the func **
** int (*PostFuncP)(); ** The func to call when completed **
** int PostArg; ** The arg for the func **
** };
**
** You need to fill in ALL fields EXCEPT NextP, which is used to link the
** blocks together either on the free list or on the Rup list.
**
** Packet is an actual packet structure to be filled in with the packet
** information associated with the command. You need to fill in everything,
** as the command processor doesn't process the command packet in any way.
**
** The PreFuncP is called before the packet is enqueued on the host rup.
** PreFuncP is called as (*PreFuncP)(PreArg, CmdBlkP);. PreFuncP must
** return !RIO_FAIL to have the packet queued on the rup, and RIO_FAIL
** if the packet is NOT to be queued.
**
** The PostFuncP is called when the command has completed. It is called
** as (*PostFuncP)(PostArg, CmdBlkP, txcontrol);. PostFuncP is not expected
** to return a value. PostFuncP does NOT need to free the command block,
** as this happens automatically after PostFuncP returns.
**
** Once the command block has been filled in, it is attached to the correct
** queue by calling RIOQueueCmdBlk( HostP, Rup, CmdBlkP ) where HostP is
** a pointer to the struct Host, Rup is the NUMBER of the rup (NOT a pointer
** to it!), and CmdBlkP is the pointer to the command block allocated using
** RIOGetCmdBlk().
**
*/
| gpl-2.0 |
opinsys/opinsys-linux | arch/sh/kernel/cpu/sh4a/clock-sh7786.c | 2790 | 6974 | /*
* arch/sh/kernel/cpu/sh4a/clock-sh7786.c
*
* SH7786 support for the clock framework
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 33333333,
};
static unsigned long pll_recalc(struct clk *clk)
{
int multiplier;
/*
* Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
* while modes 3, 4, and 5 use an x32.
*/
multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
return clk->parent->rate * multiplier;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk *clks[] = {
&extal_clk,
&pll_clk,
};
static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
24, 32, 36, 48 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
#define DIV4(_bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_P] = DIV4(0, 0x0b40, 0),
[DIV4_DU] = DIV4(4, 0x0010, 0),
[DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
[DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc40030
#define MSTPCR1 0xffc40034
enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
MSTP005, MSTP004, MSTP002,
MSTP112, MSTP110, MSTP109, MSTP108,
MSTP105, MSTP104, MSTP103, MSTP102,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
[MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
[MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
[MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
[MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
[MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
[MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
[MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
[MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
[MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
[MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
[MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
[MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
/* MSTPCR1 */
[MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
[MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
[MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
[MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
[MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
[MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
[MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
[MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */
CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.0", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.1", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.2", &mstp_clks[MSTP008]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.3", &mstp_clks[MSTP009]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.4", &mstp_clks[MSTP009]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.5", &mstp_clks[MSTP009]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.6", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.7", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.8", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.9", &mstp_clks[MSTP011]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.10", &mstp_clks[MSTP011]),
CLKDEV_ICK_ID("tmu_fck", "sh_tmu.11", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
};
int __init arch_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| gpl-2.0 |
Euphoria-OS-Devices/android_kernel_motorola_msm8226 | arch/m68k/platform/68EZ328/config.c | 4582 | 1971 | /***************************************************************************/
/*
* linux/arch/m68knommu/platform/68EZ328/config.c
*
* Copyright (C) 1993 Hamish Macdonald
* Copyright (C) 1999 D. Jeff Dionne
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/***************************************************************************/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68EZ328.h>
#ifdef CONFIG_UCSIMM
#include <asm/bootstd.h>
#endif
/***************************************************************************/
int m68328_hwclk(int set, struct rtc_time *t);
/***************************************************************************/
void m68ez328_reset(void)
{
local_irq_disable();
asm volatile (
"moveal #0x10c00000, %a0;\n"
"moveb #0, 0xFFFFF300;\n"
"moveal 0(%a0), %sp;\n"
"moveal 4(%a0), %a0;\n"
"jmp (%a0);\n"
);
}
/***************************************************************************/
unsigned char *cs8900a_hwaddr;
static int errno;
#ifdef CONFIG_UCSIMM
_bsc0(char *, getserialnum)
_bsc1(unsigned char *, gethwaddr, int, a)
_bsc1(char *, getbenv, char *, a)
#endif
void config_BSP(char *command, int len)
{
unsigned char *p;
printk(KERN_INFO "\n68EZ328 DragonBallEZ support (C) 1999 Rt-Control, Inc\n");
#ifdef CONFIG_UCSIMM
printk(KERN_INFO "uCsimm serial string [%s]\n",getserialnum());
p = cs8900a_hwaddr = gethwaddr(0);
printk(KERN_INFO "uCsimm hwaddr %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
p[0], p[1], p[2], p[3], p[4], p[5]);
p = getbenv("APPEND");
if (p) strcpy(p,command);
else command[0] = 0;
#endif
mach_hwclk = m68328_hwclk;
mach_reset = m68ez328_reset;
}
/***************************************************************************/
| gpl-2.0 |
GalaxyTab4/starlightknight_kernel_samsung_matissewifi | drivers/misc/ti-st/st_kim.c | 4838 | 24235 | /*
* Shared Transport Line discipline driver Core
* Init Manager module responsible for GPIO control
* and firmware download
* Copyright (C) 2009-2010 Texas Instruments
* Author: Pavan Savoy <pavan_savoy@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define pr_fmt(fmt) "(stk) :" fmt
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/gpio.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/skbuff.h>
#include <linux/ti_wilink_st.h>
#include <linux/module.h>
#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
/**********************************************************************/
/* internal functions */
/**
* st_get_plat_device -
* function which returns the reference to the platform device
* requested by id. As of now only 1 such device exists (id=0)
* the context requesting for reference can get the id to be
* requested by a. The protocol driver which is registering or
* b. the tty device which is opened.
*/
static struct platform_device *st_get_plat_device(int id)
{
return st_kim_devices[id];
}
/**
* validate_firmware_response -
* function to return whether the firmware response was proper
* in case of error don't complete so that waiting for proper
* response times out
*/
void validate_firmware_response(struct kim_data_s *kim_gdata)
{
struct sk_buff *skb = kim_gdata->rx_skb;
if (unlikely(skb->data[5] != 0)) {
pr_err("no proper response during fw download");
pr_err("data6 %x", skb->data[5]);
kfree_skb(skb);
return; /* keep waiting for the proper response */
}
/* becos of all the script being downloaded */
complete_all(&kim_gdata->kim_rcvd);
kfree_skb(skb);
}
/* check for data len received inside kim_int_recv
* most often hit the last case to update state to waiting for data
*/
static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
{
register int room = skb_tailroom(kim_gdata->rx_skb);
pr_debug("len %d room %d", len, room);
if (!len) {
validate_firmware_response(kim_gdata);
} else if (len > room) {
/* Received packet's payload length is larger.
* We can't accommodate it in created skb.
*/
pr_err("Data length is too large len %d room %d", len,
room);
kfree_skb(kim_gdata->rx_skb);
} else {
/* Packet header has non-zero payload length and
* we have enough space in created skb. Lets read
* payload data */
kim_gdata->rx_state = ST_W4_DATA;
kim_gdata->rx_count = len;
return len;
}
/* Change ST LL state to continue to process next
* packet */
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
return 0;
}
/**
* kim_int_recv - receive function called during firmware download
* firmware download responses on different UART drivers
* have been observed to come in bursts of different
* tty_receive and hence the logic
*/
void kim_int_recv(struct kim_data_s *kim_gdata,
const unsigned char *data, long count)
{
const unsigned char *ptr;
int len = 0, type = 0;
unsigned char *plen;
pr_debug("%s", __func__);
/* Decode received bytes here */
ptr = data;
if (unlikely(ptr == NULL)) {
pr_err(" received null from TTY ");
return;
}
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
kim_gdata->rx_count -= len;
count -= len;
ptr += len;
if (kim_gdata->rx_count)
continue;
/* Check ST RX state machine , where are we? */
switch (kim_gdata->rx_state) {
/* Waiting for complete packet ? */
case ST_W4_DATA:
pr_debug("Complete pkt received");
validate_firmware_response(kim_gdata);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
continue;
/* Waiting for Bluetooth event header ? */
case ST_W4_HEADER:
plen =
(unsigned char *)&kim_gdata->rx_skb->data[1];
pr_debug("event hdr: plen 0x%02x\n", *plen);
kim_check_data_len(kim_gdata, *plen);
continue;
} /* end of switch */
} /* end of if rx_state */
switch (*ptr) {
/* Bluetooth event packet? */
case 0x04:
kim_gdata->rx_state = ST_W4_HEADER;
kim_gdata->rx_count = 2;
type = *ptr;
break;
default:
pr_info("unknown packet");
ptr++;
count--;
continue;
}
ptr++;
count--;
kim_gdata->rx_skb =
alloc_skb(1024+8, GFP_ATOMIC);
if (!kim_gdata->rx_skb) {
pr_err("can't allocate mem for new packet");
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_count = 0;
return;
}
skb_reserve(kim_gdata->rx_skb, 8);
kim_gdata->rx_skb->cb[0] = 4;
kim_gdata->rx_skb->cb[1] = 0;
}
return;
}
static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
{
unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
const char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
pr_debug("%s", __func__);
INIT_COMPLETION(kim_gdata->kim_rcvd);
if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
pr_err("kim: couldn't write 4 bytes");
return -EIO;
}
if (!wait_for_completion_timeout
(&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
pr_err(" waiting for ver info- timed out ");
return -ETIMEDOUT;
}
INIT_COMPLETION(kim_gdata->kim_rcvd);
version =
MAKEWORD(kim_gdata->resp_buffer[13],
kim_gdata->resp_buffer[14]);
chip = (version & 0x7C00) >> 10;
min_ver = (version & 0x007F);
maj_ver = (version & 0x0380) >> 7;
if (version & 0x8000)
maj_ver |= 0x0008;
sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
/* to be accessed later via sysfs entry */
kim_gdata->version.full = version;
kim_gdata->version.chip = chip;
kim_gdata->version.maj_ver = maj_ver;
kim_gdata->version.min_ver = min_ver;
pr_info("%s", bts_scr_name);
return 0;
}
void skip_change_remote_baud(unsigned char **ptr, long *len)
{
unsigned char *nxt_action, *cur_action;
cur_action = *ptr;
nxt_action = cur_action + sizeof(struct bts_action) +
((struct bts_action *) cur_action)->size;
if (((struct bts_action *) nxt_action)->type != ACTION_WAIT_EVENT) {
pr_err("invalid action after change remote baud command");
} else {
*ptr = *ptr + sizeof(struct bts_action) +
((struct bts_action *)cur_action)->size;
*len = *len - (sizeof(struct bts_action) +
((struct bts_action *)cur_action)->size);
/* warn user on not commenting these in firmware */
pr_warn("skipping the wait event of change remote baud");
}
}
/**
* download_firmware -
* internal function which parses through the .bts firmware
* script file intreprets SEND, DELAY actions only as of now
*/
static long download_firmware(struct kim_data_s *kim_gdata)
{
long err = 0;
long len = 0;
unsigned char *ptr = NULL;
unsigned char *action_ptr = NULL;
unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
int wr_room_space;
int cmd_size;
unsigned long timeout;
err = read_local_version(kim_gdata, bts_scr_name);
if (err != 0) {
pr_err("kim: failed to read local ver");
return err;
}
err =
request_firmware(&kim_gdata->fw_entry, bts_scr_name,
&kim_gdata->kim_pdev->dev);
if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) ||
(kim_gdata->fw_entry->size == 0))) {
pr_err(" request_firmware failed(errno %ld) for %s", err,
bts_scr_name);
return -EINVAL;
}
ptr = (void *)kim_gdata->fw_entry->data;
len = kim_gdata->fw_entry->size;
/* bts_header to remove out magic number and
* version
*/
ptr += sizeof(struct bts_header);
len -= sizeof(struct bts_header);
while (len > 0 && ptr) {
pr_debug(" action size %d, type %d ",
((struct bts_action *)ptr)->size,
((struct bts_action *)ptr)->type);
switch (((struct bts_action *)ptr)->type) {
case ACTION_SEND_COMMAND: /* action send */
pr_debug("S");
action_ptr = &(((struct bts_action *)ptr)->data[0]);
if (unlikely
(((struct hci_command *)action_ptr)->opcode ==
0xFF36)) {
/* ignore remote change
* baud rate HCI VS command */
pr_warn("change remote baud"
" rate command in firmware");
skip_change_remote_baud(&ptr, &len);
break;
}
/*
* Make sure we have enough free space in uart
* tx buffer to write current firmware command
*/
cmd_size = ((struct bts_action *)ptr)->size;
timeout = jiffies + msecs_to_jiffies(CMD_WR_TIME);
do {
wr_room_space =
st_get_uart_wr_room(kim_gdata->core_data);
if (wr_room_space < 0) {
pr_err("Unable to get free "
"space info from uart tx buffer");
release_firmware(kim_gdata->fw_entry);
return wr_room_space;
}
mdelay(1); /* wait 1ms before checking room */
} while ((wr_room_space < cmd_size) &&
time_before(jiffies, timeout));
/* Timeout happened ? */
if (time_after_eq(jiffies, timeout)) {
pr_err("Timeout while waiting for free "
"free space in uart tx buffer");
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
/* reinit completion before sending for the
* relevant wait
*/
INIT_COMPLETION(kim_gdata->kim_rcvd);
/*
* Free space found in uart buffer, call st_int_write
* to send current firmware command to the uart tx
* buffer.
*/
err = st_int_write(kim_gdata->core_data,
((struct bts_action_send *)action_ptr)->data,
((struct bts_action *)ptr)->size);
if (unlikely(err < 0)) {
release_firmware(kim_gdata->fw_entry);
return err;
}
/*
* Check number of bytes written to the uart tx buffer
* and requested command write size
*/
if (err != cmd_size) {
pr_err("Number of bytes written to uart "
"tx buffer are not matching with "
"requested cmd write size");
release_firmware(kim_gdata->fw_entry);
return -EIO;
}
break;
case ACTION_WAIT_EVENT: /* wait */
pr_debug("W");
if (!wait_for_completion_timeout
(&kim_gdata->kim_rcvd,
msecs_to_jiffies(CMD_RESP_TIME))) {
pr_err("response timeout during fw download ");
/* timed out */
release_firmware(kim_gdata->fw_entry);
return -ETIMEDOUT;
}
INIT_COMPLETION(kim_gdata->kim_rcvd);
break;
case ACTION_DELAY: /* sleep */
pr_info("sleep command in scr");
action_ptr = &(((struct bts_action *)ptr)->data[0]);
mdelay(((struct bts_action_delay *)action_ptr)->msec);
break;
}
len =
len - (sizeof(struct bts_action) +
((struct bts_action *)ptr)->size);
ptr =
ptr + sizeof(struct bts_action) +
((struct bts_action *)ptr)->size;
}
/* fw download complete */
release_firmware(kim_gdata->fw_entry);
return 0;
}
/**********************************************************************/
/* functions called from ST core */
/* called from ST Core, when REG_IN_PROGRESS (registration in progress)
* can be because of
* 1. response to read local version
* 2. during send/recv's of firmware download
*/
void st_kim_recv(void *disc_data, const unsigned char *data, long count)
{
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
/* copy to local buffer */
if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) {
/* must be the read_ver_cmd */
memcpy(kim_gdata->resp_buffer, data, count);
complete_all(&kim_gdata->kim_rcvd);
return;
} else {
kim_int_recv(kim_gdata, data, count);
/* either completes or times out */
}
return;
}
/* to signal completion of line discipline installation
* called from ST Core, upon tty_open
*/
void st_kim_complete(void *kim_data)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
complete(&kim_gdata->ldisc_installed);
}
/**
* st_kim_start - called from ST Core upon 1st registration
* This involves toggling the chip enable gpio, reading
* the firmware version from chip, forming the fw file name
* based on the chip version, requesting the fw, parsing it
* and perform download(send/recv).
*/
long st_kim_start(void *kim_data)
{
long err = 0;
long retry = POR_RETRY_COUNT;
struct ti_st_plat_data *pdata;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
pr_info(" %s", __func__);
pdata = kim_gdata->kim_pdev->dev.platform_data;
do {
/* platform specific enabling code here */
if (pdata->chip_enable)
pdata->chip_enable(kim_gdata);
/* Configure BT nShutdown to HIGH state */
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
mdelay(5); /* FIXME: a proper toggle */
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(100);
/* re-initialize the completion */
INIT_COMPLETION(kim_gdata->ldisc_installed);
/* send notification to UIM */
kim_gdata->ldisc_install = 1;
pr_info("ldisc_install = 1");
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
NULL, "install");
/* wait for ldisc to be installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
if (!err) {
/* ldisc installation timeout,
* flush uart, power cycle BT_EN */
pr_err("ldisc installation timeout");
err = st_kim_stop(kim_gdata);
continue;
} else {
/* ldisc installed now */
pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
/* ldisc installed but fw download failed,
* flush uart & power cycle BT_EN */
pr_err("download firmware failed");
err = st_kim_stop(kim_gdata);
continue;
} else { /* on success don't retry */
break;
}
}
} while (retry--);
return err;
}
/**
* st_kim_stop - stop communication with chip.
* This can be called from ST Core/KIM, on the-
* (a) last un-register when chip need not be powered there-after,
* (b) upon failure to either install ldisc or download firmware.
* The function is responsible to (a) notify UIM about un-installation,
* (b) flush UART if the ldisc was installed.
* (c) reset BT_EN - pull down nshutdown at the end.
* (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
long err = 0;
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
struct ti_st_plat_data *pdata =
kim_gdata->kim_pdev->dev.platform_data;
struct tty_struct *tty = kim_gdata->core_data->tty;
INIT_COMPLETION(kim_gdata->ldisc_installed);
if (tty) { /* can be called before ldisc is installed */
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
tty->ops->flush_buffer(tty);
}
/* send uninstall notification to UIM */
pr_info("ldisc_install = 0");
kim_gdata->ldisc_install = 0;
sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, NULL, "install");
/* wait for ldisc to be un-installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
if (!err) { /* timeout */
pr_err(" timed out waiting for ldisc to be un-installed");
return -ETIMEDOUT;
}
/* By default configure BT nShutdown to LOW state */
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
mdelay(1);
gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
mdelay(1);
gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
/* platform specific disable */
if (pdata->chip_disable)
pdata->chip_disable(kim_gdata);
return err;
}
/**********************************************************************/
/* functions called from subsystems */
/* called when debugfs entry is read from */
static int show_version(struct seq_file *s, void *unused)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
seq_printf(s, "%04X %d.%d.%d\n", kim_gdata->version.full,
kim_gdata->version.chip, kim_gdata->version.maj_ver,
kim_gdata->version.min_ver);
return 0;
}
static int show_list(struct seq_file *s, void *unused)
{
struct kim_data_s *kim_gdata = (struct kim_data_s *)s->private;
kim_st_list_protocols(kim_gdata->core_data, s);
return 0;
}
static ssize_t show_install(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", kim_data->ldisc_install);
}
#ifdef DEBUG
static ssize_t store_dev_name(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
pr_debug("storing dev name >%s<", buf);
strncpy(kim_data->dev_name, buf, count);
pr_debug("stored dev name >%s<", kim_data->dev_name);
return count;
}
static ssize_t store_baud_rate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
pr_debug("storing baud rate >%s<", buf);
sscanf(buf, "%ld", &kim_data->baud_rate);
pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
return count;
}
#endif /* if DEBUG */
static ssize_t show_dev_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", kim_data->dev_name);
}
static ssize_t show_baud_rate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%ld\n", kim_data->baud_rate);
}
static ssize_t show_flow_cntrl(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kim_data_s *kim_data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", kim_data->flow_cntrl);
}
/* structures specific for sysfs entries */
static struct kobj_attribute ldisc_install =
__ATTR(install, 0444, (void *)show_install, NULL);
static struct kobj_attribute uart_dev_name =
#ifdef DEBUG /* TODO: move this to debug-fs if possible */
__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
#else
__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
#endif
static struct kobj_attribute uart_baud_rate =
#ifdef DEBUG /* TODO: move to debugfs */
__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
#else
__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
#endif
static struct kobj_attribute uart_flow_cntrl =
__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
static struct attribute *uim_attrs[] = {
&ldisc_install.attr,
&uart_dev_name.attr,
&uart_baud_rate.attr,
&uart_flow_cntrl.attr,
NULL,
};
static struct attribute_group uim_attr_grp = {
.attrs = uim_attrs,
};
/**
* st_kim_ref - reference the core's data
* This references the per-ST platform device in the arch/xx/
* board-xx.c file.
* This would enable multiple such platform devices to exist
* on a given platform
*/
void st_kim_ref(struct st_data_s **core_data, int id)
{
struct platform_device *pdev;
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
pdev = st_get_plat_device(id);
if (!pdev) {
*core_data = NULL;
return;
}
kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
static int kim_version_open(struct inode *i, struct file *f)
{
return single_open(f, show_version, i->i_private);
}
static int kim_list_open(struct inode *i, struct file *f)
{
return single_open(f, show_list, i->i_private);
}
static const struct file_operations version_debugfs_fops = {
/* version info */
.open = kim_version_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations list_debugfs_fops = {
/* protocols info */
.open = kim_list_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**********************************************************************/
/* functions called from platform device driver subsystem
* need to have a relevant platform device entry in the platform's
* board-*.c file
*/
struct dentry *kim_debugfs_dir;
static int kim_probe(struct platform_device *pdev)
{
long status;
struct kim_data_s *kim_gdata;
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
/* multiple devices could exist */
st_kim_devices[pdev->id] = pdev;
} else {
/* platform's sure about existence of 1 device */
st_kim_devices[0] = pdev;
}
kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
if (!kim_gdata) {
pr_err("no mem to allocate");
return -ENOMEM;
}
dev_set_drvdata(&pdev->dev, kim_gdata);
status = st_core_init(&kim_gdata->core_data);
if (status != 0) {
pr_err(" ST core init failed");
return -EIO;
}
/* refer to itself */
kim_gdata->core_data->kim_data = kim_gdata;
/* Claim the chip enable nShutdown gpio from the system */
kim_gdata->nshutdown = pdata->nshutdown_gpio;
status = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(status)) {
pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
return status;
}
/* Configure nShutdown GPIO as output=0 */
status = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(status)) {
pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
return status;
}
/* get reference of pdev for request_firmware
*/
kim_gdata->kim_pdev = pdev;
init_completion(&kim_gdata->kim_rcvd);
init_completion(&kim_gdata->ldisc_installed);
status = sysfs_create_group(&pdev->dev.kobj, &uim_attr_grp);
if (status) {
pr_err("failed to create sysfs entries");
return status;
}
/* copying platform data */
strncpy(kim_gdata->dev_name, pdata->dev_name, UART_DEV_NAME_LEN);
kim_gdata->flow_cntrl = pdata->flow_cntrl;
kim_gdata->baud_rate = pdata->baud_rate;
pr_info("sysfs entries created\n");
kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
if (IS_ERR(kim_debugfs_dir)) {
pr_err(" debugfs entries creation failed ");
kim_debugfs_dir = NULL;
return -EIO;
}
debugfs_create_file("version", S_IRUGO, kim_debugfs_dir,
kim_gdata, &version_debugfs_fops);
debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
kim_gdata, &list_debugfs_fops);
pr_info(" debugfs entries created ");
return 0;
}
static int kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
kim_gdata = dev_get_drvdata(&pdev->dev);
/* Free the Bluetooth/FM/GPIO
* nShutdown gpio from the system
*/
gpio_free(pdata->nshutdown_gpio);
pr_info("nshutdown GPIO Freed");
debugfs_remove_recursive(kim_debugfs_dir);
sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
pr_info("sysfs entries removed");
kim_gdata->kim_pdev = NULL;
st_core_exit(kim_gdata->core_data);
kfree(kim_gdata);
kim_gdata = NULL;
return 0;
}
int kim_suspend(struct platform_device *pdev, pm_message_t state)
{
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->suspend)
return pdata->suspend(pdev, state);
return -EOPNOTSUPP;
}
int kim_resume(struct platform_device *pdev)
{
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
if (pdata->resume)
return pdata->resume(pdev);
return -EOPNOTSUPP;
}
/**********************************************************************/
/* entry point for ST KIM module, called in from ST Core */
static struct platform_driver kim_platform_driver = {
.probe = kim_probe,
.remove = kim_remove,
.suspend = kim_suspend,
.resume = kim_resume,
.driver = {
.name = "kim",
.owner = THIS_MODULE,
},
};
module_platform_driver(kim_platform_driver);
MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips ");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DirtyUnicorns/android_kernel_samsung_manta | drivers/s390/char/sclp_sdias.c | 4838 | 7185 | /*
* Sclp "store data in absolut storage"
*
* Copyright IBM Corp. 2003,2007
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "sclp_sdias"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
#define EQ_STORE_DATA 0x0
#define EQ_SIZE 0x1
#define DI_FCP_DUMP 0x0
#define ASA_SIZE_32 0x0
#define ASA_SIZE_64 0x1
#define EVSTATE_ALL_STORED 0x0
#define EVSTATE_NO_DATA 0x3
#define EVSTATE_PART_STORED 0x10
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
struct sdias_evbuf {
struct evbuf_header hdr;
u8 event_qual;
u8 data_id;
u64 reserved2;
u32 event_id;
u16 reserved3;
u8 asa_size;
u8 event_status;
u32 reserved4;
u32 blk_cnt;
u64 asa;
u32 reserved5;
u32 fbn;
u32 reserved6;
u32 lbn;
u16 reserved7;
u16 dbs;
} __attribute__((packed));
struct sdias_sccb {
struct sccb_header hdr;
struct sdias_evbuf evbuf;
} __attribute__((packed));
static struct sdias_sccb sccb __attribute__((aligned(4096)));
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
static DECLARE_COMPLETION(evbuf_done);
static DEFINE_MUTEX(sdias_mutex);
/*
* Called by SCLP base when read event data has been completed (async mode only)
*/
static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
{
memcpy(&sdias_evbuf, evbuf,
min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
complete(&evbuf_done);
TRACE("sclp_sdias_receiver_fn done\n");
}
/*
* Called by SCLP base when sdias event has been accepted
*/
static void sdias_callback(struct sclp_req *request, void *data)
{
complete(&evbuf_accepted);
TRACE("callback done\n");
}
static int sdias_sclp_send(struct sclp_req *req)
{
int retries;
int rc;
for (retries = SDIAS_RETRIES; retries; retries--) {
TRACE("add request\n");
rc = sclp_add_request(req);
if (rc) {
/* not initiated, wait some time and retry */
set_current_state(TASK_INTERRUPTIBLE);
TRACE("add request failed: rc = %i\n",rc);
schedule_timeout(SDIAS_SLEEP_TICKS);
continue;
}
/* initiated, wait for completion of service call */
wait_for_completion(&evbuf_accepted);
if (req->status == SCLP_REQ_FAILED) {
TRACE("sclp request failed\n");
continue;
}
/* if not accepted, retry */
if (!(sccb.evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
sccb.evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
/* otherwise we wait for completion */
wait_for_completion(&evbuf_done);
TRACE("request done\n");
return 0;
}
return -EIO;
}
/*
* Get number of blocks (4K) available in the HSA
*/
int sclp_sdias_blk_count(void)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.event_qual = EQ_SIZE;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("send failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case 0:
rc = sdias_evbuf.blk_cnt;
break;
default:
pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
rc = -EIO;
goto out;
}
TRACE("%i blocks\n", rc);
out:
mutex_unlock(&sdias_mutex);
return rc;
}
/*
* Copy from HSA to absolute storage (not reentrant):
*
* @dest : Address of buffer where data should be copied
* @start_blk: Start Block (beginning with 1)
* @nr_blks : Number of 4K blocks to copy
*
* Return Value: 0 : Requested 'number' of blocks of data copied
* <0: ERROR - negative event status
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.hdr.flags = 0;
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef __s390x__
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
sccb.evbuf.asa = (unsigned long)dest;
sccb.evbuf.fbn = start_blk;
sccb.evbuf.lbn = 0;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("copy failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case EVSTATE_ALL_STORED:
TRACE("all stored\n");
case EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case EVSTATE_NO_DATA:
TRACE("no data\n");
default:
pr_err("Error from SCLP while copying hsa. "
"Event status = %x\n",
sdias_evbuf.event_status);
rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
return rc;
}
static int __init sclp_sdias_register_check(void)
{
int rc;
rc = sclp_register(&sclp_sdias_register);
if (rc)
return rc;
if (sclp_sdias_blk_count() == 0) {
sclp_unregister(&sclp_sdias_register);
return -ENODEV;
}
return 0;
}
static int __init sclp_sdias_init_sync(void)
{
TRACE("Try synchronous mode\n");
sclp_sdias_register.receive_mask = 0;
sclp_sdias_register.receiver_fn = NULL;
return sclp_sdias_register_check();
}
static int __init sclp_sdias_init_async(void)
{
TRACE("Try asynchronous mode\n");
sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
return sclp_sdias_register_check();
}
int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
if (sclp_sdias_init_sync() == 0)
goto out;
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
return -ENODEV;
out:
TRACE("init done\n");
return 0;
}
void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
}
| gpl-2.0 |
donkeykang/donkeyk | drivers/mtd/maps/dc21285.c | 4838 | 5533 | /*
* MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
*
* (C) 2000 Nicolas Pitre <nico@fluxnic.net>
*
* This code is GPL
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/hardware/dec21285.h>
#include <asm/mach-types.h>
static struct mtd_info *dc21285_mtd;
#ifdef CONFIG_ARCH_NETWINDER
/*
* This is really ugly, but it seams to be the only
* realiable way to do it, as the cpld state machine
* is unpredictible. So we have a 25us penalty per
* write access.
*/
static void nw_en_write(void)
{
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
*/
udelay(25);
}
#else
#define nw_en_write() do { } while (0)
#endif
static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint8_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint16_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint32_t*)(map->virt + ofs);
return val;
}
static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
memcpy(to, (void*)(map->virt + from), len);
}
static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint8_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint16_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*(uint32_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint32_t*)from);
dc21285_write32(map, d, to);
from += 4;
to += 4;
len -= 4;
}
}
static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint16_t*)from);
dc21285_write16(map, d, to);
from += 2;
to += 2;
len -= 2;
}
}
static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
map_word d;
d.x[0] = *((uint8_t*)from);
dc21285_write8(map, d, to);
from++;
to++;
len--;
}
static struct map_info dc21285_map = {
.name = "DC21285 flash",
.phys = NO_XIP,
.size = 16*1024*1024,
.copy_from = dc21285_copy_from,
};
/* Partition stuff */
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
dc21285_map.bankwidth = 1;
dc21285_map.read = dc21285_read8;
dc21285_map.write = dc21285_write8;
dc21285_map.copy_to = dc21285_copy_to_8;
break;
case SA110_CNTL_ROMWIDTH_16:
dc21285_map.bankwidth = 2;
dc21285_map.read = dc21285_read16;
dc21285_map.write = dc21285_write16;
dc21285_map.copy_to = dc21285_copy_to_16;
break;
case SA110_CNTL_ROMWIDTH_32:
dc21285_map.bankwidth = 4;
dc21285_map.read = dc21285_read32;
dc21285_map.write = dc21285_write32;
dc21285_map.copy_to = dc21285_copy_to_32;
break;
default:
printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
return -ENXIO;
}
printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
dc21285_map.bankwidth*8);
/* Let's map the flash area */
dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
if (!dc21285_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
if (machine_is_ebsa285()) {
dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
} else {
dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
}
if (!dc21285_mtd) {
iounmap(dc21285_map.virt);
return -ENXIO;
}
dc21285_mtd->owner = THIS_MODULE;
mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
if(machine_is_ebsa285()) {
/*
* Flash timing is determined with bits 19-16 of the
* CSR_SA110_CNTL. The value is the number of wait cycles, or
* 0 for 16 cycles (the default). Cycles are 20 ns.
* Here we use 7 for 140 ns flash chips.
*/
/* access time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
/* burst time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
/* tristate time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
}
return 0;
}
static void __exit cleanup_dc21285(void)
{
mtd_device_unregister(dc21285_mtd);
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
module_init(init_dc21285);
module_exit(cleanup_dc21285);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
| gpl-2.0 |
eugenesan/android_kernel_lge_hammerhead | arch/sh/boards/mach-hp6xx/pm.c | 9190 | 3198 | /*
* hp6x0 Power Management Routines
*
* Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*/
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/hd64461.h>
#include <asm/bl_bit.h>
#include <mach/hp6xx.h>
#include <cpu/dac.h>
#include <asm/freq.h>
#include <asm/watchdog.h>
#define INTR_OFFSET 0x600
#define STBCR 0xffffff82
#define STBCR2 0xffffff88
#define STBCR_STBY 0x80
#define STBCR_MSTP2 0x04
#define MCR 0xffffff68
#define RTCNT 0xffffff70
#define MCR_RMODE 2
#define MCR_RFSH 4
extern u8 wakeup_start;
extern u8 wakeup_end;
static void pm_enter(void)
{
u8 stbcr, csr;
u16 frqcr, mcr;
u32 vbr_new, vbr_old;
set_bl_bit();
/* set wdt */
csr = sh_wdt_read_csr();
csr &= ~WTCSR_TME;
csr |= WTCSR_CKS_4096;
sh_wdt_write_csr(csr);
csr = sh_wdt_read_csr();
sh_wdt_write_cnt(0);
/* disable PLL1 */
frqcr = __raw_readw(FRQCR);
frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY);
__raw_writew(frqcr, FRQCR);
/* enable standby */
stbcr = __raw_readb(STBCR);
__raw_writeb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR);
/* set self-refresh */
mcr = __raw_readw(MCR);
__raw_writew(mcr & ~MCR_RFSH, MCR);
/* set interrupt handler */
asm volatile("stc vbr, %0" : "=r" (vbr_old));
vbr_new = get_zeroed_page(GFP_ATOMIC);
udelay(50);
memcpy((void*)(vbr_new + INTR_OFFSET),
&wakeup_start, &wakeup_end - &wakeup_start);
asm volatile("ldc %0, vbr" : : "r" (vbr_new));
__raw_writew(0, RTCNT);
__raw_writew(mcr | MCR_RFSH | MCR_RMODE, MCR);
cpu_sleep();
asm volatile("ldc %0, vbr" : : "r" (vbr_old));
free_page(vbr_new);
/* enable PLL1 */
frqcr = __raw_readw(FRQCR);
frqcr |= FRQCR_PSTBY;
__raw_writew(frqcr, FRQCR);
udelay(50);
frqcr |= FRQCR_PLLEN;
__raw_writew(frqcr, FRQCR);
__raw_writeb(stbcr, STBCR);
clear_bl_bit();
}
static int hp6x0_pm_enter(suspend_state_t state)
{
u8 stbcr, stbcr2;
#ifdef CONFIG_HD64461_ENABLER
u8 scr;
u16 hd64461_stbcr;
#endif
#ifdef CONFIG_HD64461_ENABLER
outb(0, HD64461_PCC1CSCIER);
scr = inb(HD64461_PCC1SCR);
scr |= HD64461_PCCSCR_VCC1;
outb(scr, HD64461_PCC1SCR);
hd64461_stbcr = inw(HD64461_STBCR);
hd64461_stbcr |= HD64461_STBCR_SPC1ST;
outw(hd64461_stbcr, HD64461_STBCR);
#endif
__raw_writeb(0x1f, DACR);
stbcr = __raw_readb(STBCR);
__raw_writeb(0x01, STBCR);
stbcr2 = __raw_readb(STBCR2);
__raw_writeb(0x7f , STBCR2);
outw(0xf07f, HD64461_SCPUCR);
pm_enter();
outw(0, HD64461_SCPUCR);
__raw_writeb(stbcr, STBCR);
__raw_writeb(stbcr2, STBCR2);
#ifdef CONFIG_HD64461_ENABLER
hd64461_stbcr = inw(HD64461_STBCR);
hd64461_stbcr &= ~HD64461_STBCR_SPC1ST;
outw(hd64461_stbcr, HD64461_STBCR);
outb(0x4c, HD64461_PCC1CSCIER);
outb(0x00, HD64461_PCC1CSCR);
#endif
return 0;
}
static const struct platform_suspend_ops hp6x0_pm_ops = {
.enter = hp6x0_pm_enter,
.valid = suspend_valid_only_mem,
};
static int __init hp6x0_pm_init(void)
{
suspend_set_ops(&hp6x0_pm_ops);
return 0;
}
late_initcall(hp6x0_pm_init);
| gpl-2.0 |
ubports/android_kernel_oneplus_one | sound/pci/echoaudio/layla24_dsp.c | 12518 | 10447 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int write_control_reg(struct echoaudio *chip, u32 value, char force);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic);
static int check_asic_status(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Layla24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != LAYLA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->has_midi = TRUE;
chip->dsp_code_to_load = FW_LAYLA24_DSP;
chip->input_clock_types =
ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
chip->digital_modes =
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
if ((err = init_line_levels(chip)) < 0)
return err;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->digital_in_automute = TRUE;
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT)
clock_bits |= ECHO_CLOCK_BIT_ADAT;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD)
clock_bits |= ECHO_CLOCK_BIT_WORD;
return clock_bits;
}
/* Layla24 has an ASIC on the PCI card and another ASIC in the external box;
both need to be loaded. */
static int load_asic(struct echoaudio *chip)
{
int err;
if (chip->asic_loaded)
return 1;
DE_INIT(("load_asic\n"));
/* Give the DSP a few milliseconds to settle down */
mdelay(10);
/* Load the ASIC for the PCI card */
err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_PCI_CARD_ASIC,
FW_LAYLA24_1_ASIC);
if (err < 0)
return err;
chip->asic_code = FW_LAYLA24_2S_ASIC;
/* Now give the new ASIC a little time to set up */
mdelay(10);
/* Do the external one */
err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
FW_LAYLA24_2S_ASIC);
if (err < 0)
return FALSE;
/* Now give the external ASIC a little time to set up */
mdelay(10);
/* See if it worked */
err = check_asic_status(chip);
/* Set up the control register if the load succeeded -
48 kHz, internal clock, S/PDIF RCA mode */
if (!err)
err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
TRUE);
DE_INIT(("load_asic() done\n"));
return err;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg, clock, base_rate;
if (snd_BUG_ON(rate >= 50000 &&
chip->digital_mode == DIGITAL_MODE_ADAT))
return -EINVAL;
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
DE_ACT(("set_sample_rate: Cannot set sample rate - "
"clock not set to CLK_CLOCKININTERNAL\n"));
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
}
/* Get the control register & clear the appropriate bits */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_CLOCK_CLEAR_MASK & GML_SPDIF_RATE_CLEAR_MASK;
clock = 0;
switch (rate) {
case 96000:
clock = GML_96KHZ;
break;
case 88200:
clock = GML_88KHZ;
break;
case 48000:
clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1;
break;
case 44100:
clock = GML_44KHZ;
/* Professional mode */
if (control_reg & GML_SPDIF_PRO_MODE)
clock |= GML_SPDIF_SAMPLE_RATE0;
break;
case 32000:
clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 |
GML_SPDIF_SAMPLE_RATE1;
break;
case 22050:
clock = GML_22KHZ;
break;
case 16000:
clock = GML_16KHZ;
break;
case 11025:
clock = GML_11KHZ;
break;
case 8000:
clock = GML_8KHZ;
break;
default:
/* If this is a non-standard rate, then the driver needs to
use Layla24's special "continuous frequency" mode */
clock = LAYLA24_CONTINUOUS_CLOCK;
if (rate > 50000) {
base_rate = rate >> 1;
control_reg |= GML_DOUBLE_SPEED_MODE;
} else {
base_rate = rate;
}
if (base_rate < 25000)
base_rate = 25000;
if (wait_handshake(chip))
return -EIO;
chip->comm_page->sample_rate =
cpu_to_le32(LAYLA24_MAGIC_NUMBER / base_rate - 2);
clear_handshake(chip);
send_vector(chip, DSP_VC_SET_LAYLA24_FREQUENCY_REG);
}
control_reg |= clock;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP ? */
chip->sample_rate = rate;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, control_reg));
return write_control_reg(chip, control_reg, FALSE);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
GML_CLOCK_CLEAR_MASK;
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
/* Pick the new clock */
switch (clock) {
case ECHO_CLOCK_INTERNAL:
DE_ACT(("Set Layla24 clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_SPDIF_CLOCK;
/* Layla24 doesn't support 96KHz S/PDIF */
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to SPDIF\n"));
break;
case ECHO_CLOCK_WORD:
control_reg |= GML_WORD_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to WORD\n"));
break;
case ECHO_CLOCK_ADAT:
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
DE_ACT(("Set Layla24 clock to ADAT\n"));
break;
default:
DE_ACT(("Input clock 0x%x not supported for Layla24\n", clock));
return -EINVAL;
}
chip->input_clock = clock;
return write_control_reg(chip, control_reg, TRUE);
}
/* Depending on what digital mode you want, Layla24 needs different ASICs
loaded. This function checks the ASIC needed for the new mode and sees
if it matches the one already loaded. */
static int switch_asic(struct echoaudio *chip, short asic)
{
s8 *monitors;
/* Check to see if this is already loaded */
if (asic != chip->asic_code) {
monitors = kmemdup(chip->comm_page->monitors,
MONITOR_ARRAY_SIZE, GFP_KERNEL);
if (! monitors)
return -ENOMEM;
memset(chip->comm_page->monitors, ECHOGAIN_MUTED,
MONITOR_ARRAY_SIZE);
/* Load the desired ASIC */
if (load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
asic) < 0) {
memcpy(chip->comm_page->monitors, monitors,
MONITOR_ARRAY_SIZE);
kfree(monitors);
return -EIO;
}
chip->asic_code = asic;
memcpy(chip->comm_page->monitors, monitors, MONITOR_ARRAY_SIZE);
kfree(monitors);
}
return 0;
}
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
{
u32 control_reg;
int err, incompatible_clock;
short asic;
/* Set clock to "internal" if it's not compatible with the new mode */
incompatible_clock = FALSE;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
case DIGITAL_MODE_SPDIF_RCA:
if (chip->input_clock == ECHO_CLOCK_ADAT)
incompatible_clock = TRUE;
asic = FW_LAYLA24_2S_ASIC;
break;
case DIGITAL_MODE_ADAT:
if (chip->input_clock == ECHO_CLOCK_SPDIF)
incompatible_clock = TRUE;
asic = FW_LAYLA24_2A_ASIC;
break;
default:
DE_ACT(("Digital mode not supported: %d\n", mode));
return -EINVAL;
}
if (incompatible_clock) { /* Switch to 48KHz, internal */
chip->sample_rate = 48000;
spin_lock_irq(&chip->lock);
set_input_clock(chip, ECHO_CLOCK_INTERNAL);
spin_unlock_irq(&chip->lock);
}
/* switch_asic() can sleep */
if (switch_asic(chip, asic) < 0)
return -EIO;
spin_lock_irq(&chip->lock);
/* Tweak the control register */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_DIGITAL_MODE_CLEAR_MASK;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
control_reg |= GML_SPDIF_OPTICAL_MODE;
break;
case DIGITAL_MODE_SPDIF_RCA:
/* GML_SPDIF_OPTICAL_MODE bit cleared */
break;
case DIGITAL_MODE_ADAT:
control_reg |= GML_ADAT_MODE;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
}
err = write_control_reg(chip, control_reg, TRUE);
spin_unlock_irq(&chip->lock);
if (err < 0)
return err;
chip->digital_mode = mode;
DE_ACT(("set_digital_mode to %d\n", mode));
return incompatible_clock;
}
| gpl-2.0 |
erasmux/pyramid-gb-kernel | sound/pci/echoaudio/mona_dsp.c | 12518 | 11013 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int write_control_reg(struct echoaudio *chip, u32 value, char force);
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int set_digital_mode(struct echoaudio *chip, u8 mode);
static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic);
static int check_asic_status(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Mona\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MONA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->input_clock_types =
ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
chip->digital_modes =
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_RCA |
ECHOCAPS_HAS_DIGITAL_MODE_SPDIF_OPTICAL |
ECHOCAPS_HAS_DIGITAL_MODE_ADAT;
/* Mona comes in both '301 and '361 flavors */
if (chip->device_id == DEVICE_ID_56361)
chip->dsp_code_to_load = FW_MONA_361_DSP;
else
chip->dsp_code_to_load = FW_MONA_301_DSP;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
chip->professional_spdif = FALSE;
chip->digital_in_automute = TRUE;
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock
detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_ADAT)
clock_bits |= ECHO_CLOCK_BIT_ADAT;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD)
clock_bits |= ECHO_CLOCK_BIT_WORD;
return clock_bits;
}
/* Mona has an ASIC on the PCI card and another ASIC in the external box;
both need to be loaded. */
static int load_asic(struct echoaudio *chip)
{
u32 control_reg;
int err;
short asic;
if (chip->asic_loaded)
return 0;
mdelay(10);
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC, asic);
if (err < 0)
return err;
chip->asic_code = asic;
mdelay(10);
/* Do the external one */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_EXTERNAL_ASIC,
FW_MONA_2_ASIC);
if (err < 0)
return err;
mdelay(10);
err = check_asic_status(chip);
/* Set up the control register if the load succeeded -
48 kHz, internal clock, S/PDIF RCA mode */
if (!err) {
control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
err = write_control_reg(chip, control_reg, TRUE);
}
return err;
}
/* Depending on what digital mode you want, Mona needs different ASICs
loaded. This function checks the ASIC needed for the new mode and sees
if it matches the one already loaded. */
static int switch_asic(struct echoaudio *chip, char double_speed)
{
int err;
short asic;
/* Check the clock detect bits to see if this is
a single-speed clock or a double-speed clock; load
a new ASIC if necessary. */
if (chip->device_id == DEVICE_ID_56361) {
if (double_speed)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_361_1_ASIC48;
} else {
if (double_speed)
asic = FW_MONA_301_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC48;
}
if (asic != chip->asic_code) {
/* Load the desired ASIC */
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
if (err < 0)
return err;
chip->asic_code = asic;
}
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg, clock;
short asic;
char force_write;
/* Only set the clock for internal mode. */
if (chip->input_clock != ECHO_CLOCK_INTERNAL) {
DE_ACT(("set_sample_rate: Cannot set sample rate - "
"clock not set to CLK_CLOCKININTERNAL\n"));
/* Save the rate anyhow */
chip->comm_page->sample_rate = cpu_to_le32(rate);
chip->sample_rate = rate;
return 0;
}
/* Now, check to see if the required ASIC is loaded */
if (rate >= 88200) {
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EINVAL;
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC96;
else
asic = FW_MONA_301_1_ASIC96;
} else {
if (chip->device_id == DEVICE_ID_56361)
asic = FW_MONA_361_1_ASIC48;
else
asic = FW_MONA_301_1_ASIC48;
}
force_write = 0;
if (asic != chip->asic_code) {
int err;
/* Load the desired ASIC (load_asic_generic() can sleep) */
spin_unlock_irq(&chip->lock);
err = load_asic_generic(chip, DSP_FNC_LOAD_MONA_PCI_CARD_ASIC,
asic);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
chip->asic_code = asic;
force_write = 1;
}
/* Compute the new control register value */
clock = 0;
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_CLOCK_CLEAR_MASK;
control_reg &= GML_SPDIF_RATE_CLEAR_MASK;
switch (rate) {
case 96000:
clock = GML_96KHZ;
break;
case 88200:
clock = GML_88KHZ;
break;
case 48000:
clock = GML_48KHZ | GML_SPDIF_SAMPLE_RATE1;
break;
case 44100:
clock = GML_44KHZ;
/* Professional mode */
if (control_reg & GML_SPDIF_PRO_MODE)
clock |= GML_SPDIF_SAMPLE_RATE0;
break;
case 32000:
clock = GML_32KHZ | GML_SPDIF_SAMPLE_RATE0 |
GML_SPDIF_SAMPLE_RATE1;
break;
case 22050:
clock = GML_22KHZ;
break;
case 16000:
clock = GML_16KHZ;
break;
case 11025:
clock = GML_11KHZ;
break;
case 8000:
clock = GML_8KHZ;
break;
default:
DE_ACT(("set_sample_rate: %d invalid!\n", rate));
return -EINVAL;
}
control_reg |= clock;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->sample_rate = rate;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
return write_control_reg(chip, control_reg, force_write);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
u32 control_reg, clocks_from_dsp;
int err;
DE_ACT(("set_input_clock:\n"));
/* Prevent two simultaneous calls to switch_asic() */
if (atomic_read(&chip->opencount))
return -EAGAIN;
/* Mask off the clock select bits */
control_reg = le32_to_cpu(chip->comm_page->control_register) &
GML_CLOCK_CLEAR_MASK;
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
switch (clock) {
case ECHO_CLOCK_INTERNAL:
DE_ACT(("Set Mona clock to INTERNAL\n"));
chip->input_clock = ECHO_CLOCK_INTERNAL;
return set_sample_rate(chip, chip->sample_rate);
case ECHO_CLOCK_SPDIF:
if (chip->digital_mode == DIGITAL_MODE_ADAT)
return -EAGAIN;
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_SPDIF96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
DE_ACT(("Set Mona clock to SPDIF\n"));
control_reg |= GML_SPDIF_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_SPDIF96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_WORD:
DE_ACT(("Set Mona clock to WORD\n"));
spin_unlock_irq(&chip->lock);
err = switch_asic(chip, clocks_from_dsp &
GML_CLOCK_DETECT_BIT_WORD96);
spin_lock_irq(&chip->lock);
if (err < 0)
return err;
control_reg |= GML_WORD_CLOCK;
if (clocks_from_dsp & GML_CLOCK_DETECT_BIT_WORD96)
control_reg |= GML_DOUBLE_SPEED_MODE;
else
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
case ECHO_CLOCK_ADAT:
DE_ACT(("Set Mona clock to ADAT\n"));
if (chip->digital_mode != DIGITAL_MODE_ADAT)
return -EAGAIN;
control_reg |= GML_ADAT_CLOCK;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
default:
DE_ACT(("Input clock 0x%x not supported for Mona\n", clock));
return -EINVAL;
}
chip->input_clock = clock;
return write_control_reg(chip, control_reg, TRUE);
}
static int dsp_set_digital_mode(struct echoaudio *chip, u8 mode)
{
u32 control_reg;
int err, incompatible_clock;
/* Set clock to "internal" if it's not compatible with the new mode */
incompatible_clock = FALSE;
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
case DIGITAL_MODE_SPDIF_RCA:
if (chip->input_clock == ECHO_CLOCK_ADAT)
incompatible_clock = TRUE;
break;
case DIGITAL_MODE_ADAT:
if (chip->input_clock == ECHO_CLOCK_SPDIF)
incompatible_clock = TRUE;
break;
default:
DE_ACT(("Digital mode not supported: %d\n", mode));
return -EINVAL;
}
spin_lock_irq(&chip->lock);
if (incompatible_clock) { /* Switch to 48KHz, internal */
chip->sample_rate = 48000;
set_input_clock(chip, ECHO_CLOCK_INTERNAL);
}
/* Clear the current digital mode */
control_reg = le32_to_cpu(chip->comm_page->control_register);
control_reg &= GML_DIGITAL_MODE_CLEAR_MASK;
/* Tweak the control reg */
switch (mode) {
case DIGITAL_MODE_SPDIF_OPTICAL:
control_reg |= GML_SPDIF_OPTICAL_MODE;
break;
case DIGITAL_MODE_SPDIF_RCA:
/* GML_SPDIF_OPTICAL_MODE bit cleared */
break;
case DIGITAL_MODE_ADAT:
/* If the current ASIC is the 96KHz ASIC, switch the ASIC
and set to 48 KHz */
if (chip->asic_code == FW_MONA_361_1_ASIC96 ||
chip->asic_code == FW_MONA_301_1_ASIC96) {
set_sample_rate(chip, 48000);
}
control_reg |= GML_ADAT_MODE;
control_reg &= ~GML_DOUBLE_SPEED_MODE;
break;
}
err = write_control_reg(chip, control_reg, FALSE);
spin_unlock_irq(&chip->lock);
if (err < 0)
return err;
chip->digital_mode = mode;
DE_ACT(("set_digital_mode to %d\n", mode));
return incompatible_clock;
}
| gpl-2.0 |
cwyy/linux-3.4.69 | drivers/video/pmag-aa-fb.c | 13542 | 12205 | /*
* linux/drivers/video/pmag-aa-fb.c
* Copyright 2002 Karsten Merker <merker@debian.org>
*
* PMAG-AA TurboChannel framebuffer card support ... derived from
* pmag-ba-fb.c, which is Copyright (C) 1999, 2000, 2001 by
* Michael Engel <engel@unix-ag.org>, Karsten Merker <merker@debian.org>
* and Harald Koerfgen <hkoerfg@web.de>, which itself is derived from
* "HP300 Topcat framebuffer support (derived from macfb of all things)
* Phil Blundell <philb@gnu.org> 1998"
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* 2002-09-28 Karsten Merker <merker@linuxtag.org>
* Version 0.01: First try to get a PMAG-AA running.
*
* 2003-02-24 Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
* Version 0.02: Major code cleanup.
*
* 2003-09-21 Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
* Hardware cursor support.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/console.h>
#include <asm/bootinfo.h>
#include <asm/dec/machtype.h>
#include <asm/dec/tc.h>
#include <video/fbcon.h>
#include <video/fbcon-cfb8.h>
#include "bt455.h"
#include "bt431.h"
/* Version information */
#define DRIVER_VERSION "0.02"
#define DRIVER_AUTHOR "Karsten Merker <merker@linuxtag.org>"
#define DRIVER_DESCRIPTION "PMAG-AA Framebuffer Driver"
/* Prototypes */
static int aafb_set_var(struct fb_var_screeninfo *var, int con,
struct fb_info *info);
/*
* Bt455 RAM DAC register base offset (rel. to TC slot base address).
*/
#define PMAG_AA_BT455_OFFSET 0x100000
/*
* Bt431 cursor generator offset (rel. to TC slot base address).
*/
#define PMAG_AA_BT431_OFFSET 0x180000
/*
* Begin of PMAG-AA framebuffer memory relative to TC slot address,
* resolution is 1280x1024x1 (8 bits deep, but only LSB is used).
*/
#define PMAG_AA_ONBOARD_FBMEM_OFFSET 0x200000
struct aafb_cursor {
struct timer_list timer;
int enable;
int on;
int vbl_cnt;
int blink_rate;
u16 x, y, width, height;
};
#define CURSOR_TIMER_FREQ (HZ / 50)
#define CURSOR_BLINK_RATE (20)
#define CURSOR_DRAW_DELAY (2)
struct aafb_info {
struct fb_info info;
struct display disp;
struct aafb_cursor cursor;
struct bt455_regs *bt455;
struct bt431_regs *bt431;
unsigned long fb_start;
unsigned long fb_size;
unsigned long fb_line_length;
};
/*
* Max 3 TURBOchannel slots -> max 3 PMAG-AA.
*/
static struct aafb_info my_fb_info[3];
static struct aafb_par {
} current_par;
static int currcon = -1;
static void aafb_set_cursor(struct aafb_info *info, int on)
{
struct aafb_cursor *c = &info->cursor;
if (on) {
bt431_position_cursor(info->bt431, c->x, c->y);
bt431_enable_cursor(info->bt431);
} else
bt431_erase_cursor(info->bt431);
}
static void aafbcon_cursor(struct display *disp, int mode, int x, int y)
{
struct aafb_info *info = (struct aafb_info *)disp->fb_info;
struct aafb_cursor *c = &info->cursor;
x *= fontwidth(disp);
y *= fontheight(disp);
if (c->x == x && c->y == y && (mode == CM_ERASE) == !c->enable)
return;
c->enable = 0;
if (c->on)
aafb_set_cursor(info, 0);
c->x = x - disp->var.xoffset;
c->y = y - disp->var.yoffset;
switch (mode) {
case CM_ERASE:
c->on = 0;
break;
case CM_DRAW:
case CM_MOVE:
if (c->on)
aafb_set_cursor(info, c->on);
else
c->vbl_cnt = CURSOR_DRAW_DELAY;
c->enable = 1;
break;
}
}
static int aafbcon_set_font(struct display *disp, int width, int height)
{
struct aafb_info *info = (struct aafb_info *)disp->fb_info;
struct aafb_cursor *c = &info->cursor;
u8 fgc = ~attr_bgcol_ec(disp, disp->conp, &info->info);
if (width > 64 || height > 64 || width < 0 || height < 0)
return -EINVAL;
c->height = height;
c->width = width;
bt431_set_font(info->bt431, fgc, width, height);
return 1;
}
static void aafb_cursor_timer_handler(unsigned long data)
{
struct aafb_info *info = (struct aafb_info *)data;
struct aafb_cursor *c = &info->cursor;
if (!c->enable)
goto out;
if (c->vbl_cnt && --c->vbl_cnt == 0) {
c->on ^= 1;
aafb_set_cursor(info, c->on);
c->vbl_cnt = c->blink_rate;
}
out:
c->timer.expires = jiffies + CURSOR_TIMER_FREQ;
add_timer(&c->timer);
}
static void __init aafb_cursor_init(struct aafb_info *info)
{
struct aafb_cursor *c = &info->cursor;
c->enable = 1;
c->on = 1;
c->x = c->y = 0;
c->width = c->height = 0;
c->vbl_cnt = CURSOR_DRAW_DELAY;
c->blink_rate = CURSOR_BLINK_RATE;
init_timer(&c->timer);
c->timer.data = (unsigned long)info;
c->timer.function = aafb_cursor_timer_handler;
mod_timer(&c->timer, jiffies + CURSOR_TIMER_FREQ);
}
static void __exit aafb_cursor_exit(struct aafb_info *info)
{
struct aafb_cursor *c = &info->cursor;
del_timer_sync(&c->timer);
}
static struct display_switch aafb_switch8 = {
.setup = fbcon_cfb8_setup,
.bmove = fbcon_cfb8_bmove,
.clear = fbcon_cfb8_clear,
.putc = fbcon_cfb8_putc,
.putcs = fbcon_cfb8_putcs,
.revc = fbcon_cfb8_revc,
.cursor = aafbcon_cursor,
.set_font = aafbcon_set_font,
.clear_margins = fbcon_cfb8_clear_margins,
.fontwidthmask = FONTWIDTH(4)|FONTWIDTH(8)|FONTWIDTH(12)|FONTWIDTH(16)
};
static void aafb_get_par(struct aafb_par *par)
{
*par = current_par;
}
static int aafb_get_fix(struct fb_fix_screeninfo *fix, int con,
struct fb_info *info)
{
struct aafb_info *ip = (struct aafb_info *)info;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "PMAG-AA");
fix->smem_start = ip->fb_start;
fix->smem_len = ip->fb_size;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->ypanstep = 1;
fix->ywrapstep = 1;
fix->visual = FB_VISUAL_MONO10;
fix->line_length = 1280;
fix->accel = FB_ACCEL_NONE;
return 0;
}
static void aafb_set_disp(struct display *disp, int con,
struct aafb_info *info)
{
struct fb_fix_screeninfo fix;
disp->fb_info = &info->info;
aafb_set_var(&disp->var, con, &info->info);
if (disp->conp && disp->conp->vc_sw && disp->conp->vc_sw->con_cursor)
disp->conp->vc_sw->con_cursor(disp->conp, CM_ERASE);
disp->dispsw = &aafb_switch8;
disp->dispsw_data = 0;
aafb_get_fix(&fix, con, &info->info);
disp->screen_base = (u8 *) fix.smem_start;
disp->visual = fix.visual;
disp->type = fix.type;
disp->type_aux = fix.type_aux;
disp->ypanstep = fix.ypanstep;
disp->ywrapstep = fix.ywrapstep;
disp->line_length = fix.line_length;
disp->next_line = 2048;
disp->can_soft_blank = 1;
disp->inverse = 0;
disp->scrollmode = SCROLL_YREDRAW;
aafbcon_set_font(disp, fontwidth(disp), fontheight(disp));
}
static int aafb_get_cmap(struct fb_cmap *cmap, int kspc, int con,
struct fb_info *info)
{
static u16 color[2] = {0x0000, 0x000f};
static struct fb_cmap aafb_cmap = {0, 2, color, color, color, NULL};
fb_copy_cmap(&aafb_cmap, cmap, kspc ? 0 : 2);
return 0;
}
static int aafb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
struct fb_info *info)
{
u16 color[2] = {0x0000, 0x000f};
if (cmap->start == 0
&& cmap->len == 2
&& memcmp(cmap->red, color, sizeof(color)) == 0
&& memcmp(cmap->green, color, sizeof(color)) == 0
&& memcmp(cmap->blue, color, sizeof(color)) == 0
&& cmap->transp == NULL)
return 0;
else
return -EINVAL;
}
static int aafb_ioctl(struct fb_info *info, u32 cmd, unsigned long arg)
{
/* TODO: Not yet implemented */
return -ENOIOCTLCMD;
}
static int aafb_switch(int con, struct fb_info *info)
{
struct aafb_info *ip = (struct aafb_info *)info;
struct display *old = (currcon < 0) ? &ip->disp : (fb_display + currcon);
struct display *new = (con < 0) ? &ip->disp : (fb_display + con);
if (old->conp && old->conp->vc_sw && old->conp->vc_sw->con_cursor)
old->conp->vc_sw->con_cursor(old->conp, CM_ERASE);
/* Set the current console. */
currcon = con;
aafb_set_disp(new, con, ip);
return 0;
}
static void aafb_encode_var(struct fb_var_screeninfo *var,
struct aafb_par *par)
{
var->xres = 1280;
var->yres = 1024;
var->xres_virtual = 2048;
var->yres_virtual = 1024;
var->xoffset = 0;
var->yoffset = 0;
var->bits_per_pixel = 8;
var->grayscale = 1;
var->red.offset = 0;
var->red.length = 0;
var->red.msb_right = 0;
var->green.offset = 0;
var->green.length = 1;
var->green.msb_right = 0;
var->blue.offset = 0;
var->blue.length = 0;
var->blue.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
var->transp.msb_right = 0;
var->nonstd = 0;
var->activate &= ~FB_ACTIVATE_MASK & FB_ACTIVATE_NOW;
var->accel_flags = 0;
var->sync = FB_SYNC_ON_GREEN;
var->vmode &= ~FB_VMODE_MASK & FB_VMODE_NONINTERLACED;
}
static int aafb_get_var(struct fb_var_screeninfo *var, int con,
struct fb_info *info)
{
if (con < 0) {
struct aafb_par par;
memset(var, 0, sizeof(struct fb_var_screeninfo));
aafb_get_par(&par);
aafb_encode_var(var, &par);
} else
*var = info->var;
return 0;
}
static int aafb_set_var(struct fb_var_screeninfo *var, int con,
struct fb_info *info)
{
struct aafb_par par;
aafb_get_par(&par);
aafb_encode_var(var, &par);
info->var = *var;
return 0;
}
static int aafb_update_var(int con, struct fb_info *info)
{
struct aafb_info *ip = (struct aafb_info *)info;
struct display *disp = (con < 0) ? &ip->disp : (fb_display + con);
if (con == currcon)
aafbcon_cursor(disp, CM_ERASE, ip->cursor.x, ip->cursor.y);
return 0;
}
/* 0 unblanks, any other blanks. */
static void aafb_blank(int blank, struct fb_info *info)
{
struct aafb_info *ip = (struct aafb_info *)info;
u8 val = blank ? 0x00 : 0x0f;
bt455_write_cmap_entry(ip->bt455, 1, val, val, val);
aafbcon_cursor(&ip->disp, CM_ERASE, ip->cursor.x, ip->cursor.y);
}
static struct fb_ops aafb_ops = {
.owner = THIS_MODULE,
.fb_get_fix = aafb_get_fix,
.fb_get_var = aafb_get_var,
.fb_set_var = aafb_set_var,
.fb_get_cmap = aafb_get_cmap,
.fb_set_cmap = aafb_set_cmap,
.fb_ioctl = aafb_ioctl
};
static int __init init_one(int slot)
{
unsigned long base_addr = CKSEG1ADDR(get_tc_base_addr(slot));
struct aafb_info *ip = &my_fb_info[slot];
memset(ip, 0, sizeof(struct aafb_info));
/*
* Framebuffer display memory base address and friends.
*/
ip->bt455 = (struct bt455_regs *) (base_addr + PMAG_AA_BT455_OFFSET);
ip->bt431 = (struct bt431_regs *) (base_addr + PMAG_AA_BT431_OFFSET);
ip->fb_start = base_addr + PMAG_AA_ONBOARD_FBMEM_OFFSET;
ip->fb_size = 2048 * 1024; /* fb_fix_screeninfo.smem_length
seems to be physical */
ip->fb_line_length = 2048;
/*
* Let there be consoles..
*/
strcpy(ip->info.modename, "PMAG-AA");
ip->info.node = -1;
ip->info.flags = FBINFO_FLAG_DEFAULT;
ip->info.fbops = &aafb_ops;
ip->info.disp = &ip->disp;
ip->info.changevar = NULL;
ip->info.switch_con = &aafb_switch;
ip->info.updatevar = &aafb_update_var;
ip->info.blank = &aafb_blank;
aafb_set_disp(&ip->disp, currcon, ip);
/*
* Configure the RAM DACs.
*/
bt455_erase_cursor(ip->bt455);
/* Init colormap. */
bt455_write_cmap_entry(ip->bt455, 0, 0x00, 0x00, 0x00);
bt455_write_cmap_entry(ip->bt455, 1, 0x0f, 0x0f, 0x0f);
/* Init hardware cursor. */
bt431_init_cursor(ip->bt431);
aafb_cursor_init(ip);
/* Clear the screen. */
memset ((void *)ip->fb_start, 0, ip->fb_size);
if (register_framebuffer(&ip->info) < 0)
return -EINVAL;
printk(KERN_INFO "fb%d: %s frame buffer in TC slot %d\n",
GET_FB_IDX(ip->info.node), ip->info.modename, slot);
return 0;
}
static int __exit exit_one(int slot)
{
struct aafb_info *ip = &my_fb_info[slot];
if (unregister_framebuffer(&ip->info) < 0)
return -EINVAL;
return 0;
}
/*
* Initialise the framebuffer.
*/
int __init pmagaafb_init(void)
{
int sid;
int found = 0;
while ((sid = search_tc_card("PMAG-AA")) >= 0) {
found = 1;
claim_tc_card(sid);
init_one(sid);
}
return found ? 0 : -ENXIO;
}
static void __exit pmagaafb_exit(void)
{
int sid;
while ((sid = search_tc_card("PMAG-AA")) >= 0) {
exit_one(sid);
release_tc_card(sid);
}
}
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
#ifdef MODULE
module_init(pmagaafb_init);
module_exit(pmagaafb_exit);
#endif
| gpl-2.0 |
luk1337/android_kernel_samsung_i9082 | drivers/media/video/stk-sensor.c | 14566 | 19466 | /* stk-sensor.c: Driver for ov96xx sensor (used in some Syntek webcams)
*
* Copyright 2007-2008 Jaime Velasco Juan <jsagarribay@gmail.com>
*
* Some parts derived from ov7670.c:
* Copyright 2006 One Laptop Per Child Association, Inc. Written
* by Jonathan Corbet with substantial inspiration from Mark
* McClelland's ovcamchip code.
*
* Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
*
* This file may be distributed under the terms of the GNU General
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Controlling the sensor via the STK1125 vendor specific control interface:
* The camera uses an OmniVision sensor and the stk1125 provides an
* SCCB(i2c)-USB bridge which let us program the sensor.
* In my case the sensor id is 0x9652, it can be read from sensor's register
* 0x0A and 0x0B as follows:
* - read register #R:
* output #R to index 0x0208
* output 0x0070 to index 0x0200
* input 1 byte from index 0x0201 (some kind of status register)
* until its value is 0x01
* input 1 byte from index 0x0209. This is the value of #R
* - write value V to register #R
* output #R to index 0x0204
* output V to index 0x0205
* output 0x0005 to index 0x0200
* input 1 byte from index 0x0201 until its value becomes 0x04
*/
/* It seems the i2c bus is controlled with these registers */
#include "stk-webcam.h"
#define STK_IIC_BASE (0x0200)
# define STK_IIC_OP (STK_IIC_BASE)
# define STK_IIC_OP_TX (0x05)
# define STK_IIC_OP_RX (0x70)
# define STK_IIC_STAT (STK_IIC_BASE+1)
# define STK_IIC_STAT_TX_OK (0x04)
# define STK_IIC_STAT_RX_OK (0x01)
/* I don't know what does this register.
* when it is 0x00 or 0x01, we cannot talk to the sensor,
* other values work */
# define STK_IIC_ENABLE (STK_IIC_BASE+2)
# define STK_IIC_ENABLE_NO (0x00)
/* This is what the driver writes in windows */
# define STK_IIC_ENABLE_YES (0x1e)
/*
* Address of the slave. Seems like the binary driver look for the
* sensor in multiple places, attempting a reset sequence.
* We only know about the ov9650
*/
# define STK_IIC_ADDR (STK_IIC_BASE+3)
# define STK_IIC_TX_INDEX (STK_IIC_BASE+4)
# define STK_IIC_TX_VALUE (STK_IIC_BASE+5)
# define STK_IIC_RX_INDEX (STK_IIC_BASE+8)
# define STK_IIC_RX_VALUE (STK_IIC_BASE+9)
#define MAX_RETRIES (50)
#define SENSOR_ADDRESS (0x60)
/* From ov7670.c (These registers aren't fully accurate) */
/* Registers */
#define REG_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */
#define REG_BLUE 0x01 /* blue gain */
#define REG_RED 0x02 /* red gain */
#define REG_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */
#define REG_COM1 0x04 /* Control 1 */
#define COM1_CCIR656 0x40 /* CCIR656 enable */
#define COM1_QFMT 0x20 /* QVGA/QCIF format */
#define COM1_SKIP_0 0x00 /* Do not skip any row */
#define COM1_SKIP_2 0x04 /* Skip 2 rows of 4 */
#define COM1_SKIP_3 0x08 /* Skip 3 rows of 4 */
#define REG_BAVE 0x05 /* U/B Average level */
#define REG_GbAVE 0x06 /* Y/Gb Average level */
#define REG_AECHH 0x07 /* AEC MS 5 bits */
#define REG_RAVE 0x08 /* V/R Average level */
#define REG_COM2 0x09 /* Control 2 */
#define COM2_SSLEEP 0x10 /* Soft sleep mode */
#define REG_PID 0x0a /* Product ID MSB */
#define REG_VER 0x0b /* Product ID LSB */
#define REG_COM3 0x0c /* Control 3 */
#define COM3_SWAP 0x40 /* Byte swap */
#define COM3_SCALEEN 0x08 /* Enable scaling */
#define COM3_DCWEN 0x04 /* Enable downsamp/crop/window */
#define REG_COM4 0x0d /* Control 4 */
#define REG_COM5 0x0e /* All "reserved" */
#define REG_COM6 0x0f /* Control 6 */
#define REG_AECH 0x10 /* More bits of AEC value */
#define REG_CLKRC 0x11 /* Clock control */
#define CLK_PLL 0x80 /* Enable internal PLL */
#define CLK_EXT 0x40 /* Use external clock directly */
#define CLK_SCALE 0x3f /* Mask for internal clock scale */
#define REG_COM7 0x12 /* Control 7 */
#define COM7_RESET 0x80 /* Register reset */
#define COM7_FMT_MASK 0x38
#define COM7_FMT_SXGA 0x00
#define COM7_FMT_VGA 0x40
#define COM7_FMT_CIF 0x20 /* CIF format */
#define COM7_FMT_QVGA 0x10 /* QVGA format */
#define COM7_FMT_QCIF 0x08 /* QCIF format */
#define COM7_RGB 0x04 /* bits 0 and 2 - RGB format */
#define COM7_YUV 0x00 /* YUV */
#define COM7_BAYER 0x01 /* Bayer format */
#define COM7_PBAYER 0x05 /* "Processed bayer" */
#define REG_COM8 0x13 /* Control 8 */
#define COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */
#define COM8_AECSTEP 0x40 /* Unlimited AEC step size */
#define COM8_BFILT 0x20 /* Band filter enable */
#define COM8_AGC 0x04 /* Auto gain enable */
#define COM8_AWB 0x02 /* White balance enable */
#define COM8_AEC 0x01 /* Auto exposure enable */
#define REG_COM9 0x14 /* Control 9 - gain ceiling */
#define REG_COM10 0x15 /* Control 10 */
#define COM10_HSYNC 0x40 /* HSYNC instead of HREF */
#define COM10_PCLK_HB 0x20 /* Suppress PCLK on horiz blank */
#define COM10_HREF_REV 0x08 /* Reverse HREF */
#define COM10_VS_LEAD 0x04 /* VSYNC on clock leading edge */
#define COM10_VS_NEG 0x02 /* VSYNC negative */
#define COM10_HS_NEG 0x01 /* HSYNC negative */
#define REG_HSTART 0x17 /* Horiz start high bits */
#define REG_HSTOP 0x18 /* Horiz stop high bits */
#define REG_VSTART 0x19 /* Vert start high bits */
#define REG_VSTOP 0x1a /* Vert stop high bits */
#define REG_PSHFT 0x1b /* Pixel delay after HREF */
#define REG_MIDH 0x1c /* Manuf. ID high */
#define REG_MIDL 0x1d /* Manuf. ID low */
#define REG_MVFP 0x1e /* Mirror / vflip */
#define MVFP_MIRROR 0x20 /* Mirror image */
#define MVFP_FLIP 0x10 /* Vertical flip */
#define REG_AEW 0x24 /* AGC upper limit */
#define REG_AEB 0x25 /* AGC lower limit */
#define REG_VPT 0x26 /* AGC/AEC fast mode op region */
#define REG_ADVFL 0x2d /* Insert dummy lines (LSB) */
#define REG_ADVFH 0x2e /* Insert dummy lines (MSB) */
#define REG_HSYST 0x30 /* HSYNC rising edge delay */
#define REG_HSYEN 0x31 /* HSYNC falling edge delay */
#define REG_HREF 0x32 /* HREF pieces */
#define REG_TSLB 0x3a /* lots of stuff */
#define TSLB_YLAST 0x04 /* UYVY or VYUY - see com13 */
#define TSLB_BYTEORD 0x08 /* swap bytes in 16bit mode? */
#define REG_COM11 0x3b /* Control 11 */
#define COM11_NIGHT 0x80 /* NIght mode enable */
#define COM11_NMFR 0x60 /* Two bit NM frame rate */
#define COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */
#define COM11_50HZ 0x08 /* Manual 50Hz select */
#define COM11_EXP 0x02
#define REG_COM12 0x3c /* Control 12 */
#define COM12_HREF 0x80 /* HREF always */
#define REG_COM13 0x3d /* Control 13 */
#define COM13_GAMMA 0x80 /* Gamma enable */
#define COM13_UVSAT 0x40 /* UV saturation auto adjustment */
#define COM13_CMATRIX 0x10 /* Enable color matrix for RGB or YUV */
#define COM13_UVSWAP 0x01 /* V before U - w/TSLB */
#define REG_COM14 0x3e /* Control 14 */
#define COM14_DCWEN 0x10 /* DCW/PCLK-scale enable */
#define REG_EDGE 0x3f /* Edge enhancement factor */
#define REG_COM15 0x40 /* Control 15 */
#define COM15_R10F0 0x00 /* Data range 10 to F0 */
#define COM15_R01FE 0x80 /* 01 to FE */
#define COM15_R00FF 0xc0 /* 00 to FF */
#define COM15_RGB565 0x10 /* RGB565 output */
#define COM15_RGBFIXME 0x20 /* FIXME */
#define COM15_RGB555 0x30 /* RGB555 output */
#define REG_COM16 0x41 /* Control 16 */
#define COM16_AWBGAIN 0x08 /* AWB gain enable */
#define REG_COM17 0x42 /* Control 17 */
#define COM17_AECWIN 0xc0 /* AEC window - must match COM4 */
#define COM17_CBAR 0x08 /* DSP Color bar */
/*
* This matrix defines how the colors are generated, must be
* tweaked to adjust hue and saturation.
*
* Order: v-red, v-green, v-blue, u-red, u-green, u-blue
*
* They are nine-bit signed quantities, with the sign bit
* stored in 0x58. Sign for v-red is bit 0, and up from there.
*/
#define REG_CMATRIX_BASE 0x4f
#define CMATRIX_LEN 6
#define REG_CMATRIX_SIGN 0x58
#define REG_BRIGHT 0x55 /* Brightness */
#define REG_CONTRAS 0x56 /* Contrast control */
#define REG_GFIX 0x69 /* Fix gain control */
#define REG_RGB444 0x8c /* RGB 444 control */
#define R444_ENABLE 0x02 /* Turn on RGB444, overrides 5x5 */
#define R444_RGBX 0x01 /* Empty nibble at end */
#define REG_HAECC1 0x9f /* Hist AEC/AGC control 1 */
#define REG_HAECC2 0xa0 /* Hist AEC/AGC control 2 */
#define REG_BD50MAX 0xa5 /* 50hz banding step limit */
#define REG_HAECC3 0xa6 /* Hist AEC/AGC control 3 */
#define REG_HAECC4 0xa7 /* Hist AEC/AGC control 4 */
#define REG_HAECC5 0xa8 /* Hist AEC/AGC control 5 */
#define REG_HAECC6 0xa9 /* Hist AEC/AGC control 6 */
#define REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
#define REG_BD60MAX 0xab /* 60hz banding step limit */
/* Returns 0 if OK */
static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
{
int i = 0;
int tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
return 1;
if (stk_camera_write_reg(dev, STK_IIC_TX_VALUE, val))
return 1;
if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_TX))
return 1;
do {
if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval))
return 1;
i++;
} while (tmpval == 0 && i < MAX_RETRIES);
if (tmpval != STK_IIC_STAT_TX_OK) {
if (tmpval)
STK_ERROR("stk_sensor_outb failed, status=0x%02x\n",
tmpval);
return 1;
} else
return 0;
}
static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
{
int i = 0;
int tmpval = 0;
if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
return 1;
if (stk_camera_write_reg(dev, STK_IIC_OP, STK_IIC_OP_RX))
return 1;
do {
if (stk_camera_read_reg(dev, STK_IIC_STAT, &tmpval))
return 1;
i++;
} while (tmpval == 0 && i < MAX_RETRIES);
if (tmpval != STK_IIC_STAT_RX_OK) {
if (tmpval)
STK_ERROR("stk_sensor_inb failed, status=0x%02x\n",
tmpval);
return 1;
}
if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
return 1;
*val = (u8) tmpval;
return 0;
}
static int stk_sensor_write_regvals(struct stk_camera *dev,
struct regval *rv)
{
int ret;
if (rv == NULL)
return 0;
while (rv->reg != 0xff || rv->val != 0xff) {
ret = stk_sensor_outb(dev, rv->reg, rv->val);
if (ret != 0)
return ret;
rv++;
}
return 0;
}
int stk_sensor_sleep(struct stk_camera *dev)
{
u8 tmp;
return stk_sensor_inb(dev, REG_COM2, &tmp)
|| stk_sensor_outb(dev, REG_COM2, tmp|COM2_SSLEEP);
}
int stk_sensor_wakeup(struct stk_camera *dev)
{
u8 tmp;
return stk_sensor_inb(dev, REG_COM2, &tmp)
|| stk_sensor_outb(dev, REG_COM2, tmp&~COM2_SSLEEP);
}
static struct regval ov_initvals[] = {
{REG_CLKRC, CLK_PLL},
{REG_COM11, 0x01},
{0x6a, 0x7d},
{REG_AECH, 0x40},
{REG_GAIN, 0x00},
{REG_BLUE, 0x80},
{REG_RED, 0x80},
/* Do not enable fast AEC for now */
/*{REG_COM8, COM8_FASTAEC|COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC},*/
{REG_COM8, COM8_AECSTEP|COM8_BFILT|COM8_AGC|COM8_AEC},
{0x39, 0x50}, {0x38, 0x93},
{0x37, 0x00}, {0x35, 0x81},
{REG_COM5, 0x20},
{REG_COM1, 0x00},
{REG_COM3, 0x00},
{REG_COM4, 0x00},
{REG_PSHFT, 0x00},
{0x16, 0x07},
{0x33, 0xe2}, {0x34, 0xbf},
{REG_COM16, 0x00},
{0x96, 0x04},
/* Gamma curve values */
/* { 0x7a, 0x20 }, { 0x7b, 0x10 },
{ 0x7c, 0x1e }, { 0x7d, 0x35 },
{ 0x7e, 0x5a }, { 0x7f, 0x69 },
{ 0x80, 0x76 }, { 0x81, 0x80 },
{ 0x82, 0x88 }, { 0x83, 0x8f },
{ 0x84, 0x96 }, { 0x85, 0xa3 },
{ 0x86, 0xaf }, { 0x87, 0xc4 },
{ 0x88, 0xd7 }, { 0x89, 0xe8 },
*/
{REG_GFIX, 0x40},
{0x8e, 0x00},
{REG_COM12, 0x73},
{0x8f, 0xdf}, {0x8b, 0x06},
{0x8c, 0x20},
{0x94, 0x88}, {0x95, 0x88},
/* {REG_COM15, 0xc1}, TODO */
{0x29, 0x3f},
{REG_COM6, 0x42},
{REG_BD50MAX, 0x80},
{REG_HAECC6, 0xb8}, {REG_HAECC7, 0x92},
{REG_BD60MAX, 0x0a},
{0x90, 0x00}, {0x91, 0x00},
{REG_HAECC1, 0x00}, {REG_HAECC2, 0x00},
{REG_AEW, 0x68}, {REG_AEB, 0x5c},
{REG_VPT, 0xc3},
{REG_COM9, 0x2e},
{0x2a, 0x00}, {0x2b, 0x00},
{0xff, 0xff}, /* END MARKER */
};
/* Probe the I2C bus and initialise the sensor chip */
int stk_sensor_init(struct stk_camera *dev)
{
u8 idl = 0;
u8 idh = 0;
if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES)
|| stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS)
|| stk_sensor_outb(dev, REG_COM7, COM7_RESET)) {
STK_ERROR("Sensor resetting failed\n");
return -ENODEV;
}
msleep(10);
/* Read the manufacturer ID: ov = 0x7FA2 */
if (stk_sensor_inb(dev, REG_MIDH, &idh)
|| stk_sensor_inb(dev, REG_MIDL, &idl)) {
STK_ERROR("Strange error reading sensor ID\n");
return -ENODEV;
}
if (idh != 0x7f || idl != 0xa2) {
STK_ERROR("Huh? you don't have a sensor from ovt\n");
return -ENODEV;
}
if (stk_sensor_inb(dev, REG_PID, &idh)
|| stk_sensor_inb(dev, REG_VER, &idl)) {
STK_ERROR("Could not read sensor model\n");
return -ENODEV;
}
stk_sensor_write_regvals(dev, ov_initvals);
msleep(10);
STK_INFO("OmniVision sensor detected, id %02X%02X"
" at address %x\n", idh, idl, SENSOR_ADDRESS);
return 0;
}
/* V4L2_PIX_FMT_UYVY */
static struct regval ov_fmt_uyvy[] = {
{REG_TSLB, TSLB_YLAST|0x08 },
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
{ 0x50, 0x80 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
{ 0x52, 0x22 }, /* "matrix coefficient 4" */
{ 0x53, 0x5e }, /* "matrix coefficient 5" */
{ 0x54, 0x80 }, /* "matrix coefficient 6" */
{REG_COM13, COM13_UVSAT|COM13_CMATRIX},
{REG_COM15, COM15_R00FF },
{0xff, 0xff}, /* END MARKER */
};
/* V4L2_PIX_FMT_YUYV */
static struct regval ov_fmt_yuyv[] = {
{REG_TSLB, 0 },
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
{ 0x50, 0x80 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
{ 0x52, 0x22 }, /* "matrix coefficient 4" */
{ 0x53, 0x5e }, /* "matrix coefficient 5" */
{ 0x54, 0x80 }, /* "matrix coefficient 6" */
{REG_COM13, COM13_UVSAT|COM13_CMATRIX},
{REG_COM15, COM15_R00FF },
{0xff, 0xff}, /* END MARKER */
};
/* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */
static struct regval ov_fmt_rgbr[] = {
{ REG_RGB444, 0 }, /* No RGB444 please */
{REG_TSLB, 0x00},
{ REG_COM1, 0x0 },
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
{ 0x50, 0xb3 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
{ 0x52, 0x3d }, /* "matrix coefficient 4" */
{ 0x53, 0xa7 }, /* "matrix coefficient 5" */
{ 0x54, 0xe4 }, /* "matrix coefficient 6" */
{ REG_COM13, COM13_GAMMA },
{ REG_COM15, COM15_RGB565|COM15_R00FF },
{ 0xff, 0xff },
};
/* V4L2_PIX_FMT_RGB565 gggbbbbb rrrrrggg */
static struct regval ov_fmt_rgbp[] = {
{ REG_RGB444, 0 }, /* No RGB444 please */
{REG_TSLB, TSLB_BYTEORD },
{ REG_COM1, 0x0 },
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
{ 0x50, 0xb3 }, /* "matrix coefficient 2" */
{ 0x51, 0 }, /* vb */
{ 0x52, 0x3d }, /* "matrix coefficient 4" */
{ 0x53, 0xa7 }, /* "matrix coefficient 5" */
{ 0x54, 0xe4 }, /* "matrix coefficient 6" */
{ REG_COM13, COM13_GAMMA },
{ REG_COM15, COM15_RGB565|COM15_R00FF },
{ 0xff, 0xff },
};
/* V4L2_PIX_FMT_SRGGB8 */
static struct regval ov_fmt_bayer[] = {
/* This changes color order */
{REG_TSLB, 0x40}, /* BGGR */
/* {REG_TSLB, 0x08}, */ /* BGGR with vertical image flipping */
{REG_COM15, COM15_R00FF },
{0xff, 0xff}, /* END MARKER */
};
/*
* Store a set of start/stop values into the camera.
*/
static int stk_sensor_set_hw(struct stk_camera *dev,
int hstart, int hstop, int vstart, int vstop)
{
int ret;
unsigned char v;
/*
* Horizontal: 11 bits, top 8 live in hstart and hstop. Bottom 3 of
* hstart are in href[2:0], bottom 3 of hstop in href[5:3]. There is
* a mystery "edge offset" value in the top two bits of href.
*/
ret = stk_sensor_outb(dev, REG_HSTART, (hstart >> 3) & 0xff);
ret += stk_sensor_outb(dev, REG_HSTOP, (hstop >> 3) & 0xff);
ret += stk_sensor_inb(dev, REG_HREF, &v);
v = (v & 0xc0) | ((hstop & 0x7) << 3) | (hstart & 0x7);
msleep(10);
ret += stk_sensor_outb(dev, REG_HREF, v);
/*
* Vertical: similar arrangement (note: this is different from ov7670.c)
*/
ret += stk_sensor_outb(dev, REG_VSTART, (vstart >> 3) & 0xff);
ret += stk_sensor_outb(dev, REG_VSTOP, (vstop >> 3) & 0xff);
ret += stk_sensor_inb(dev, REG_VREF, &v);
v = (v & 0xc0) | ((vstop & 0x7) << 3) | (vstart & 0x7);
msleep(10);
ret += stk_sensor_outb(dev, REG_VREF, v);
return ret;
}
int stk_sensor_configure(struct stk_camera *dev)
{
int com7;
/*
* We setup the sensor to output dummy lines in low-res modes,
* so we don't get absurdly hight framerates.
*/
unsigned dummylines;
int flip;
struct regval *rv;
switch (dev->vsettings.mode) {
case MODE_QCIF: com7 = COM7_FMT_QCIF;
dummylines = 604;
break;
case MODE_QVGA: com7 = COM7_FMT_QVGA;
dummylines = 267;
break;
case MODE_CIF: com7 = COM7_FMT_CIF;
dummylines = 412;
break;
case MODE_VGA: com7 = COM7_FMT_VGA;
dummylines = 11;
break;
case MODE_SXGA: com7 = COM7_FMT_SXGA;
dummylines = 0;
break;
default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode);
return -EFAULT;
}
switch (dev->vsettings.palette) {
case V4L2_PIX_FMT_UYVY:
com7 |= COM7_YUV;
rv = ov_fmt_uyvy;
break;
case V4L2_PIX_FMT_YUYV:
com7 |= COM7_YUV;
rv = ov_fmt_yuyv;
break;
case V4L2_PIX_FMT_RGB565:
com7 |= COM7_RGB;
rv = ov_fmt_rgbp;
break;
case V4L2_PIX_FMT_RGB565X:
com7 |= COM7_RGB;
rv = ov_fmt_rgbr;
break;
case V4L2_PIX_FMT_SBGGR8:
com7 |= COM7_PBAYER;
rv = ov_fmt_bayer;
break;
default: STK_ERROR("Unsupported colorspace\n");
return -EFAULT;
}
/*FIXME sometimes the sensor go to a bad state
stk_sensor_write_regvals(dev, ov_initvals); */
stk_sensor_outb(dev, REG_COM7, com7);
msleep(50);
stk_sensor_write_regvals(dev, rv);
flip = (dev->vsettings.vflip?MVFP_FLIP:0)
| (dev->vsettings.hflip?MVFP_MIRROR:0);
stk_sensor_outb(dev, REG_MVFP, flip);
if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8
&& !dev->vsettings.vflip)
stk_sensor_outb(dev, REG_TSLB, 0x08);
stk_sensor_outb(dev, REG_ADVFH, dummylines >> 8);
stk_sensor_outb(dev, REG_ADVFL, dummylines & 0xff);
msleep(50);
switch (dev->vsettings.mode) {
case MODE_VGA:
if (stk_sensor_set_hw(dev, 302, 1582, 6, 486))
STK_ERROR("stk_sensor_set_hw failed (VGA)\n");
break;
case MODE_SXGA:
case MODE_CIF:
case MODE_QVGA:
case MODE_QCIF:
/*FIXME These settings seem ignored by the sensor
if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034))
STK_ERROR("stk_sensor_set_hw failed (SXGA)\n");
*/
break;
}
msleep(10);
return 0;
}
int stk_sensor_set_brightness(struct stk_camera *dev, int br)
{
if (br < 0 || br > 0xff)
return -EINVAL;
stk_sensor_outb(dev, REG_AEB, max(0x00, br - 6));
stk_sensor_outb(dev, REG_AEW, min(0xff, br + 6));
return 0;
}
| gpl-2.0 |
drhonk/Bali_t959 | drivers/media/dvb/frontends/stv090x.c | 487 | 119377 | /*
STV0900/0903 Multistandard Broadcast Frontend driver
Copyright (C) Manu Abraham <abraham.manu@gmail.com>
Copyright (C) ST Microelectronics
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
#include "stv6110x.h" /* for demodulator internal modes */
#include "stv090x_reg.h"
#include "stv090x.h"
#include "stv090x_priv.h"
static unsigned int verbose;
module_param(verbose, int, 0644);
struct mutex demod_lock;
/* DVBS1 and DSS C/N Lookup table */
static const struct stv090x_tab stv090x_s1cn_tab[] = {
{ 0, 8917 }, /* 0.0dB */
{ 5, 8801 }, /* 0.5dB */
{ 10, 8667 }, /* 1.0dB */
{ 15, 8522 }, /* 1.5dB */
{ 20, 8355 }, /* 2.0dB */
{ 25, 8175 }, /* 2.5dB */
{ 30, 7979 }, /* 3.0dB */
{ 35, 7763 }, /* 3.5dB */
{ 40, 7530 }, /* 4.0dB */
{ 45, 7282 }, /* 4.5dB */
{ 50, 7026 }, /* 5.0dB */
{ 55, 6781 }, /* 5.5dB */
{ 60, 6514 }, /* 6.0dB */
{ 65, 6241 }, /* 6.5dB */
{ 70, 5965 }, /* 7.0dB */
{ 75, 5690 }, /* 7.5dB */
{ 80, 5424 }, /* 8.0dB */
{ 85, 5161 }, /* 8.5dB */
{ 90, 4902 }, /* 9.0dB */
{ 95, 4654 }, /* 9.5dB */
{ 100, 4417 }, /* 10.0dB */
{ 105, 4186 }, /* 10.5dB */
{ 110, 3968 }, /* 11.0dB */
{ 115, 3757 }, /* 11.5dB */
{ 120, 3558 }, /* 12.0dB */
{ 125, 3366 }, /* 12.5dB */
{ 130, 3185 }, /* 13.0dB */
{ 135, 3012 }, /* 13.5dB */
{ 140, 2850 }, /* 14.0dB */
{ 145, 2698 }, /* 14.5dB */
{ 150, 2550 }, /* 15.0dB */
{ 160, 2283 }, /* 16.0dB */
{ 170, 2042 }, /* 17.0dB */
{ 180, 1827 }, /* 18.0dB */
{ 190, 1636 }, /* 19.0dB */
{ 200, 1466 }, /* 20.0dB */
{ 210, 1315 }, /* 21.0dB */
{ 220, 1181 }, /* 22.0dB */
{ 230, 1064 }, /* 23.0dB */
{ 240, 960 }, /* 24.0dB */
{ 250, 869 }, /* 25.0dB */
{ 260, 792 }, /* 26.0dB */
{ 270, 724 }, /* 27.0dB */
{ 280, 665 }, /* 28.0dB */
{ 290, 616 }, /* 29.0dB */
{ 300, 573 }, /* 30.0dB */
{ 310, 537 }, /* 31.0dB */
{ 320, 507 }, /* 32.0dB */
{ 330, 483 }, /* 33.0dB */
{ 400, 398 }, /* 40.0dB */
{ 450, 381 }, /* 45.0dB */
{ 500, 377 } /* 50.0dB */
};
/* DVBS2 C/N Lookup table */
static const struct stv090x_tab stv090x_s2cn_tab[] = {
{ -30, 13348 }, /* -3.0dB */
{ -20, 12640 }, /* -2d.0B */
{ -10, 11883 }, /* -1.0dB */
{ 0, 11101 }, /* -0.0dB */
{ 5, 10718 }, /* 0.5dB */
{ 10, 10339 }, /* 1.0dB */
{ 15, 9947 }, /* 1.5dB */
{ 20, 9552 }, /* 2.0dB */
{ 25, 9183 }, /* 2.5dB */
{ 30, 8799 }, /* 3.0dB */
{ 35, 8422 }, /* 3.5dB */
{ 40, 8062 }, /* 4.0dB */
{ 45, 7707 }, /* 4.5dB */
{ 50, 7353 }, /* 5.0dB */
{ 55, 7025 }, /* 5.5dB */
{ 60, 6684 }, /* 6.0dB */
{ 65, 6331 }, /* 6.5dB */
{ 70, 6036 }, /* 7.0dB */
{ 75, 5727 }, /* 7.5dB */
{ 80, 5437 }, /* 8.0dB */
{ 85, 5164 }, /* 8.5dB */
{ 90, 4902 }, /* 9.0dB */
{ 95, 4653 }, /* 9.5dB */
{ 100, 4408 }, /* 10.0dB */
{ 105, 4187 }, /* 10.5dB */
{ 110, 3961 }, /* 11.0dB */
{ 115, 3751 }, /* 11.5dB */
{ 120, 3558 }, /* 12.0dB */
{ 125, 3368 }, /* 12.5dB */
{ 130, 3191 }, /* 13.0dB */
{ 135, 3017 }, /* 13.5dB */
{ 140, 2862 }, /* 14.0dB */
{ 145, 2710 }, /* 14.5dB */
{ 150, 2565 }, /* 15.0dB */
{ 160, 2300 }, /* 16.0dB */
{ 170, 2058 }, /* 17.0dB */
{ 180, 1849 }, /* 18.0dB */
{ 190, 1663 }, /* 19.0dB */
{ 200, 1495 }, /* 20.0dB */
{ 210, 1349 }, /* 21.0dB */
{ 220, 1222 }, /* 22.0dB */
{ 230, 1110 }, /* 23.0dB */
{ 240, 1011 }, /* 24.0dB */
{ 250, 925 }, /* 25.0dB */
{ 260, 853 }, /* 26.0dB */
{ 270, 789 }, /* 27.0dB */
{ 280, 734 }, /* 28.0dB */
{ 290, 690 }, /* 29.0dB */
{ 300, 650 }, /* 30.0dB */
{ 310, 619 }, /* 31.0dB */
{ 320, 593 }, /* 32.0dB */
{ 330, 571 }, /* 33.0dB */
{ 400, 498 }, /* 40.0dB */
{ 450, 484 }, /* 45.0dB */
{ 500, 481 } /* 50.0dB */
};
/* RF level C/N lookup table */
static const struct stv090x_tab stv090x_rf_tab[] = {
{ -5, 0xcaa1 }, /* -5dBm */
{ -10, 0xc229 }, /* -10dBm */
{ -15, 0xbb08 }, /* -15dBm */
{ -20, 0xb4bc }, /* -20dBm */
{ -25, 0xad5a }, /* -25dBm */
{ -30, 0xa298 }, /* -30dBm */
{ -35, 0x98a8 }, /* -35dBm */
{ -40, 0x8389 }, /* -40dBm */
{ -45, 0x59be }, /* -45dBm */
{ -50, 0x3a14 }, /* -50dBm */
{ -55, 0x2d11 }, /* -55dBm */
{ -60, 0x210d }, /* -60dBm */
{ -65, 0xa14f }, /* -65dBm */
{ -70, 0x07aa } /* -70dBm */
};
static struct stv090x_reg stv0900_initval[] = {
{ STV090x_OUTCFG, 0x00 },
{ STV090x_MODECFG, 0xff },
{ STV090x_AGCRF1CFG, 0x11 },
{ STV090x_AGCRF2CFG, 0x13 },
{ STV090x_TSGENERAL1X, 0x14 },
{ STV090x_TSTTNR2, 0x21 },
{ STV090x_TSTTNR4, 0x21 },
{ STV090x_P2_DISTXCTL, 0x22 },
{ STV090x_P2_F22TX, 0xc0 },
{ STV090x_P2_F22RX, 0xc0 },
{ STV090x_P2_DISRXCTL, 0x00 },
{ STV090x_P2_DMDCFGMD, 0xF9 },
{ STV090x_P2_DEMOD, 0x08 },
{ STV090x_P2_DMDCFG3, 0xc4 },
{ STV090x_P2_CARFREQ, 0xed },
{ STV090x_P2_LDT, 0xd0 },
{ STV090x_P2_LDT2, 0xb8 },
{ STV090x_P2_TMGCFG, 0xd2 },
{ STV090x_P2_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P2_TMGTHFALL, 0x00 },
{ STV090x_P2_FECSPY, 0x88 },
{ STV090x_P2_FSPYDATA, 0x3a },
{ STV090x_P2_FBERCPT4, 0x00 },
{ STV090x_P2_FSPYBER, 0x10 },
{ STV090x_P2_ERRCTRL1, 0x35 },
{ STV090x_P2_ERRCTRL2, 0xc1 },
{ STV090x_P2_CFRICFG, 0xf8 },
{ STV090x_P2_NOSCFG, 0x1c },
{ STV090x_P2_DMDTOM, 0x20 },
{ STV090x_P2_CORRELMANT, 0x70 },
{ STV090x_P2_CORRELABS, 0x88 },
{ STV090x_P2_AGC2O, 0x5b },
{ STV090x_P2_AGC2REF, 0x38 },
{ STV090x_P2_CARCFG, 0xe4 },
{ STV090x_P2_ACLC, 0x1A },
{ STV090x_P2_BCLC, 0x09 },
{ STV090x_P2_CARHDR, 0x08 },
{ STV090x_P2_KREFTMG, 0xc1 },
{ STV090x_P2_SFRUPRATIO, 0xf0 },
{ STV090x_P2_SFRLOWRATIO, 0x70 },
{ STV090x_P2_SFRSTEP, 0x58 },
{ STV090x_P2_TMGCFG2, 0x01 },
{ STV090x_P2_CAR2CFG, 0x26 },
{ STV090x_P2_BCLC2S2Q, 0x86 },
{ STV090x_P2_BCLC2S28, 0x86 },
{ STV090x_P2_SMAPCOEF7, 0x77 },
{ STV090x_P2_SMAPCOEF6, 0x85 },
{ STV090x_P2_SMAPCOEF5, 0x77 },
{ STV090x_P2_TSCFGL, 0x20 },
{ STV090x_P2_DMDCFG2, 0x3b },
{ STV090x_P2_MODCODLST0, 0xff },
{ STV090x_P2_MODCODLST1, 0xff },
{ STV090x_P2_MODCODLST2, 0xff },
{ STV090x_P2_MODCODLST3, 0xff },
{ STV090x_P2_MODCODLST4, 0xff },
{ STV090x_P2_MODCODLST5, 0xff },
{ STV090x_P2_MODCODLST6, 0xff },
{ STV090x_P2_MODCODLST7, 0xcc },
{ STV090x_P2_MODCODLST8, 0xcc },
{ STV090x_P2_MODCODLST9, 0xcc },
{ STV090x_P2_MODCODLSTA, 0xcc },
{ STV090x_P2_MODCODLSTB, 0xcc },
{ STV090x_P2_MODCODLSTC, 0xcc },
{ STV090x_P2_MODCODLSTD, 0xcc },
{ STV090x_P2_MODCODLSTE, 0xcc },
{ STV090x_P2_MODCODLSTF, 0xcf },
{ STV090x_P1_DISTXCTL, 0x22 },
{ STV090x_P1_F22TX, 0xc0 },
{ STV090x_P1_F22RX, 0xc0 },
{ STV090x_P1_DISRXCTL, 0x00 },
{ STV090x_P1_DMDCFGMD, 0xf9 },
{ STV090x_P1_DEMOD, 0x08 },
{ STV090x_P1_DMDCFG3, 0xc4 },
{ STV090x_P1_DMDTOM, 0x20 },
{ STV090x_P1_CARFREQ, 0xed },
{ STV090x_P1_LDT, 0xd0 },
{ STV090x_P1_LDT2, 0xb8 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P1_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGTHFALL, 0x00 },
{ STV090x_P1_SFRUPRATIO, 0xf0 },
{ STV090x_P1_SFRLOWRATIO, 0x70 },
{ STV090x_P1_TSCFGL, 0x20 },
{ STV090x_P1_FECSPY, 0x88 },
{ STV090x_P1_FSPYDATA, 0x3a },
{ STV090x_P1_FBERCPT4, 0x00 },
{ STV090x_P1_FSPYBER, 0x10 },
{ STV090x_P1_ERRCTRL1, 0x35 },
{ STV090x_P1_ERRCTRL2, 0xc1 },
{ STV090x_P1_CFRICFG, 0xf8 },
{ STV090x_P1_NOSCFG, 0x1c },
{ STV090x_P1_CORRELMANT, 0x70 },
{ STV090x_P1_CORRELABS, 0x88 },
{ STV090x_P1_AGC2O, 0x5b },
{ STV090x_P1_AGC2REF, 0x38 },
{ STV090x_P1_CARCFG, 0xe4 },
{ STV090x_P1_ACLC, 0x1A },
{ STV090x_P1_BCLC, 0x09 },
{ STV090x_P1_CARHDR, 0x08 },
{ STV090x_P1_KREFTMG, 0xc1 },
{ STV090x_P1_SFRSTEP, 0x58 },
{ STV090x_P1_TMGCFG2, 0x01 },
{ STV090x_P1_CAR2CFG, 0x26 },
{ STV090x_P1_BCLC2S2Q, 0x86 },
{ STV090x_P1_BCLC2S28, 0x86 },
{ STV090x_P1_SMAPCOEF7, 0x77 },
{ STV090x_P1_SMAPCOEF6, 0x85 },
{ STV090x_P1_SMAPCOEF5, 0x77 },
{ STV090x_P1_DMDCFG2, 0x3b },
{ STV090x_P1_MODCODLST0, 0xff },
{ STV090x_P1_MODCODLST1, 0xff },
{ STV090x_P1_MODCODLST2, 0xff },
{ STV090x_P1_MODCODLST3, 0xff },
{ STV090x_P1_MODCODLST4, 0xff },
{ STV090x_P1_MODCODLST5, 0xff },
{ STV090x_P1_MODCODLST6, 0xff },
{ STV090x_P1_MODCODLST7, 0xcc },
{ STV090x_P1_MODCODLST8, 0xcc },
{ STV090x_P1_MODCODLST9, 0xcc },
{ STV090x_P1_MODCODLSTA, 0xcc },
{ STV090x_P1_MODCODLSTB, 0xcc },
{ STV090x_P1_MODCODLSTC, 0xcc },
{ STV090x_P1_MODCODLSTD, 0xcc },
{ STV090x_P1_MODCODLSTE, 0xcc },
{ STV090x_P1_MODCODLSTF, 0xcf },
{ STV090x_GENCFG, 0x1d },
{ STV090x_NBITER_NF4, 0x37 },
{ STV090x_NBITER_NF5, 0x29 },
{ STV090x_NBITER_NF6, 0x37 },
{ STV090x_NBITER_NF7, 0x33 },
{ STV090x_NBITER_NF8, 0x31 },
{ STV090x_NBITER_NF9, 0x2f },
{ STV090x_NBITER_NF10, 0x39 },
{ STV090x_NBITER_NF11, 0x3a },
{ STV090x_NBITER_NF12, 0x29 },
{ STV090x_NBITER_NF13, 0x37 },
{ STV090x_NBITER_NF14, 0x33 },
{ STV090x_NBITER_NF15, 0x2f },
{ STV090x_NBITER_NF16, 0x39 },
{ STV090x_NBITER_NF17, 0x3a },
{ STV090x_NBITERNOERR, 0x04 },
{ STV090x_GAINLLR_NF4, 0x0C },
{ STV090x_GAINLLR_NF5, 0x0F },
{ STV090x_GAINLLR_NF6, 0x11 },
{ STV090x_GAINLLR_NF7, 0x14 },
{ STV090x_GAINLLR_NF8, 0x17 },
{ STV090x_GAINLLR_NF9, 0x19 },
{ STV090x_GAINLLR_NF10, 0x20 },
{ STV090x_GAINLLR_NF11, 0x21 },
{ STV090x_GAINLLR_NF12, 0x0D },
{ STV090x_GAINLLR_NF13, 0x0F },
{ STV090x_GAINLLR_NF14, 0x13 },
{ STV090x_GAINLLR_NF15, 0x1A },
{ STV090x_GAINLLR_NF16, 0x1F },
{ STV090x_GAINLLR_NF17, 0x21 },
{ STV090x_RCCFGH, 0x20 },
{ STV090x_P1_FECM, 0x01 }, /* disable DSS modes */
{ STV090x_P2_FECM, 0x01 }, /* disable DSS modes */
{ STV090x_P1_PRVIT, 0x2F }, /* disable PR 6/7 */
{ STV090x_P2_PRVIT, 0x2F }, /* disable PR 6/7 */
};
static struct stv090x_reg stv0903_initval[] = {
{ STV090x_OUTCFG, 0x00 },
{ STV090x_AGCRF1CFG, 0x11 },
{ STV090x_STOPCLK1, 0x48 },
{ STV090x_STOPCLK2, 0x14 },
{ STV090x_TSTTNR1, 0x27 },
{ STV090x_TSTTNR2, 0x21 },
{ STV090x_P1_DISTXCTL, 0x22 },
{ STV090x_P1_F22TX, 0xc0 },
{ STV090x_P1_F22RX, 0xc0 },
{ STV090x_P1_DISRXCTL, 0x00 },
{ STV090x_P1_DMDCFGMD, 0xF9 },
{ STV090x_P1_DEMOD, 0x08 },
{ STV090x_P1_DMDCFG3, 0xc4 },
{ STV090x_P1_CARFREQ, 0xed },
{ STV090x_P1_TNRCFG2, 0x82 },
{ STV090x_P1_LDT, 0xd0 },
{ STV090x_P1_LDT2, 0xb8 },
{ STV090x_P1_TMGCFG, 0xd2 },
{ STV090x_P1_TMGTHRISE, 0x20 },
{ STV090x_P1_TMGTHFALL, 0x00 },
{ STV090x_P1_SFRUPRATIO, 0xf0 },
{ STV090x_P1_SFRLOWRATIO, 0x70 },
{ STV090x_P1_TSCFGL, 0x20 },
{ STV090x_P1_FECSPY, 0x88 },
{ STV090x_P1_FSPYDATA, 0x3a },
{ STV090x_P1_FBERCPT4, 0x00 },
{ STV090x_P1_FSPYBER, 0x10 },
{ STV090x_P1_ERRCTRL1, 0x35 },
{ STV090x_P1_ERRCTRL2, 0xc1 },
{ STV090x_P1_CFRICFG, 0xf8 },
{ STV090x_P1_NOSCFG, 0x1c },
{ STV090x_P1_DMDTOM, 0x20 },
{ STV090x_P1_CORRELMANT, 0x70 },
{ STV090x_P1_CORRELABS, 0x88 },
{ STV090x_P1_AGC2O, 0x5b },
{ STV090x_P1_AGC2REF, 0x38 },
{ STV090x_P1_CARCFG, 0xe4 },
{ STV090x_P1_ACLC, 0x1A },
{ STV090x_P1_BCLC, 0x09 },
{ STV090x_P1_CARHDR, 0x08 },
{ STV090x_P1_KREFTMG, 0xc1 },
{ STV090x_P1_SFRSTEP, 0x58 },
{ STV090x_P1_TMGCFG2, 0x01 },
{ STV090x_P1_CAR2CFG, 0x26 },
{ STV090x_P1_BCLC2S2Q, 0x86 },
{ STV090x_P1_BCLC2S28, 0x86 },
{ STV090x_P1_SMAPCOEF7, 0x77 },
{ STV090x_P1_SMAPCOEF6, 0x85 },
{ STV090x_P1_SMAPCOEF5, 0x77 },
{ STV090x_P1_DMDCFG2, 0x3b },
{ STV090x_P1_MODCODLST0, 0xff },
{ STV090x_P1_MODCODLST1, 0xff },
{ STV090x_P1_MODCODLST2, 0xff },
{ STV090x_P1_MODCODLST3, 0xff },
{ STV090x_P1_MODCODLST4, 0xff },
{ STV090x_P1_MODCODLST5, 0xff },
{ STV090x_P1_MODCODLST6, 0xff },
{ STV090x_P1_MODCODLST7, 0xcc },
{ STV090x_P1_MODCODLST8, 0xcc },
{ STV090x_P1_MODCODLST9, 0xcc },
{ STV090x_P1_MODCODLSTA, 0xcc },
{ STV090x_P1_MODCODLSTB, 0xcc },
{ STV090x_P1_MODCODLSTC, 0xcc },
{ STV090x_P1_MODCODLSTD, 0xcc },
{ STV090x_P1_MODCODLSTE, 0xcc },
{ STV090x_P1_MODCODLSTF, 0xcf },
{ STV090x_GENCFG, 0x1c },
{ STV090x_NBITER_NF4, 0x37 },
{ STV090x_NBITER_NF5, 0x29 },
{ STV090x_NBITER_NF6, 0x37 },
{ STV090x_NBITER_NF7, 0x33 },
{ STV090x_NBITER_NF8, 0x31 },
{ STV090x_NBITER_NF9, 0x2f },
{ STV090x_NBITER_NF10, 0x39 },
{ STV090x_NBITER_NF11, 0x3a },
{ STV090x_NBITER_NF12, 0x29 },
{ STV090x_NBITER_NF13, 0x37 },
{ STV090x_NBITER_NF14, 0x33 },
{ STV090x_NBITER_NF15, 0x2f },
{ STV090x_NBITER_NF16, 0x39 },
{ STV090x_NBITER_NF17, 0x3a },
{ STV090x_NBITERNOERR, 0x04 },
{ STV090x_GAINLLR_NF4, 0x0C },
{ STV090x_GAINLLR_NF5, 0x0F },
{ STV090x_GAINLLR_NF6, 0x11 },
{ STV090x_GAINLLR_NF7, 0x14 },
{ STV090x_GAINLLR_NF8, 0x17 },
{ STV090x_GAINLLR_NF9, 0x19 },
{ STV090x_GAINLLR_NF10, 0x20 },
{ STV090x_GAINLLR_NF11, 0x21 },
{ STV090x_GAINLLR_NF12, 0x0D },
{ STV090x_GAINLLR_NF13, 0x0F },
{ STV090x_GAINLLR_NF14, 0x13 },
{ STV090x_GAINLLR_NF15, 0x1A },
{ STV090x_GAINLLR_NF16, 0x1F },
{ STV090x_GAINLLR_NF17, 0x21 },
{ STV090x_RCCFGH, 0x20 },
{ STV090x_P1_FECM, 0x01 }, /*disable the DSS mode */
{ STV090x_P1_PRVIT, 0x2f } /*disable puncture rate 6/7*/
};
static struct stv090x_reg stv0900_cut20_val[] = {
{ STV090x_P2_DMDCFG3, 0xe8 },
{ STV090x_P2_DMDCFG4, 0x10 },
{ STV090x_P2_CARFREQ, 0x38 },
{ STV090x_P2_CARHDR, 0x20 },
{ STV090x_P2_KREFTMG, 0x5a },
{ STV090x_P2_SMAPCOEF7, 0x06 },
{ STV090x_P2_SMAPCOEF6, 0x00 },
{ STV090x_P2_SMAPCOEF5, 0x04 },
{ STV090x_P2_NOSCFG, 0x0c },
{ STV090x_P1_DMDCFG3, 0xe8 },
{ STV090x_P1_DMDCFG4, 0x10 },
{ STV090x_P1_CARFREQ, 0x38 },
{ STV090x_P1_CARHDR, 0x20 },
{ STV090x_P1_KREFTMG, 0x5a },
{ STV090x_P1_SMAPCOEF7, 0x06 },
{ STV090x_P1_SMAPCOEF6, 0x00 },
{ STV090x_P1_SMAPCOEF5, 0x04 },
{ STV090x_P1_NOSCFG, 0x0c },
{ STV090x_GAINLLR_NF4, 0x21 },
{ STV090x_GAINLLR_NF5, 0x21 },
{ STV090x_GAINLLR_NF6, 0x20 },
{ STV090x_GAINLLR_NF7, 0x1F },
{ STV090x_GAINLLR_NF8, 0x1E },
{ STV090x_GAINLLR_NF9, 0x1E },
{ STV090x_GAINLLR_NF10, 0x1D },
{ STV090x_GAINLLR_NF11, 0x1B },
{ STV090x_GAINLLR_NF12, 0x20 },
{ STV090x_GAINLLR_NF13, 0x20 },
{ STV090x_GAINLLR_NF14, 0x20 },
{ STV090x_GAINLLR_NF15, 0x20 },
{ STV090x_GAINLLR_NF16, 0x20 },
{ STV090x_GAINLLR_NF17, 0x21 },
};
static struct stv090x_reg stv0903_cut20_val[] = {
{ STV090x_P1_DMDCFG3, 0xe8 },
{ STV090x_P1_DMDCFG4, 0x10 },
{ STV090x_P1_CARFREQ, 0x38 },
{ STV090x_P1_CARHDR, 0x20 },
{ STV090x_P1_KREFTMG, 0x5a },
{ STV090x_P1_SMAPCOEF7, 0x06 },
{ STV090x_P1_SMAPCOEF6, 0x00 },
{ STV090x_P1_SMAPCOEF5, 0x04 },
{ STV090x_P1_NOSCFG, 0x0c },
{ STV090x_GAINLLR_NF4, 0x21 },
{ STV090x_GAINLLR_NF5, 0x21 },
{ STV090x_GAINLLR_NF6, 0x20 },
{ STV090x_GAINLLR_NF7, 0x1F },
{ STV090x_GAINLLR_NF8, 0x1E },
{ STV090x_GAINLLR_NF9, 0x1E },
{ STV090x_GAINLLR_NF10, 0x1D },
{ STV090x_GAINLLR_NF11, 0x1B },
{ STV090x_GAINLLR_NF12, 0x20 },
{ STV090x_GAINLLR_NF13, 0x20 },
{ STV090x_GAINLLR_NF14, 0x20 },
{ STV090x_GAINLLR_NF15, 0x20 },
{ STV090x_GAINLLR_NF16, 0x20 },
{ STV090x_GAINLLR_NF17, 0x21 }
};
/* Cut 2.0 Long Frame Tracking CR loop */
static struct stv090x_long_frame_crloop stv090x_s2_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_12, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x1e },
{ STV090x_QPSK_35, 0x2f, 0x3f, 0x2e, 0x2f, 0x3d, 0x0f, 0x0e, 0x2e, 0x3d, 0x0e },
{ STV090x_QPSK_23, 0x2f, 0x3f, 0x2e, 0x2f, 0x0e, 0x0f, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_34, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_45, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_56, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_89, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_QPSK_910, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
{ STV090x_8PSK_35, 0x3c, 0x3e, 0x1c, 0x2e, 0x0c, 0x1e, 0x2b, 0x2d, 0x1b, 0x1d },
{ STV090x_8PSK_23, 0x1d, 0x3e, 0x3c, 0x2e, 0x2c, 0x1e, 0x0c, 0x2d, 0x2b, 0x1d },
{ STV090x_8PSK_34, 0x0e, 0x3e, 0x3d, 0x2e, 0x0d, 0x1e, 0x2c, 0x2d, 0x0c, 0x1d },
{ STV090x_8PSK_56, 0x2e, 0x3e, 0x1e, 0x2e, 0x2d, 0x1e, 0x3c, 0x2d, 0x2c, 0x1d },
{ STV090x_8PSK_89, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x0d, 0x2d, 0x3c, 0x1d },
{ STV090x_8PSK_910, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x1d, 0x2d, 0x0d, 0x1d }
};
/* Cut 3.0 Long Frame Tracking CR loop */
static struct stv090x_long_frame_crloop stv090x_s2_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_12, 0x3c, 0x2c, 0x0c, 0x2c, 0x1b, 0x2c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_35, 0x0d, 0x0d, 0x0c, 0x0d, 0x1b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_23, 0x1d, 0x0d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_34, 0x1d, 0x1d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
{ STV090x_QPSK_45, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_56, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_89, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_QPSK_910, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
{ STV090x_8PSK_35, 0x39, 0x29, 0x39, 0x19, 0x19, 0x19, 0x19, 0x19, 0x09, 0x19 },
{ STV090x_8PSK_23, 0x2a, 0x39, 0x1a, 0x0a, 0x39, 0x0a, 0x29, 0x39, 0x29, 0x0a },
{ STV090x_8PSK_34, 0x2b, 0x3a, 0x1b, 0x1b, 0x3a, 0x1b, 0x1a, 0x0b, 0x1a, 0x3a },
{ STV090x_8PSK_56, 0x0c, 0x1b, 0x3b, 0x3b, 0x1b, 0x3b, 0x3a, 0x3b, 0x3a, 0x1b },
{ STV090x_8PSK_89, 0x0d, 0x3c, 0x2c, 0x2c, 0x2b, 0x0c, 0x0b, 0x3b, 0x0b, 0x1b },
{ STV090x_8PSK_910, 0x0d, 0x0d, 0x2c, 0x3c, 0x3b, 0x1c, 0x0b, 0x3b, 0x0b, 0x1b }
};
/* Cut 2.0 Long Frame Tracking CR Loop */
static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_16APSK_23, 0x0c, 0x0c, 0x0c, 0x0c, 0x1d, 0x0c, 0x3c, 0x0c, 0x2c, 0x0c },
{ STV090x_16APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0c, 0x2d, 0x0c, 0x1d, 0x0c },
{ STV090x_16APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
{ STV090x_16APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
{ STV090x_16APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
{ STV090x_16APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
{ STV090x_32APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
{ STV090x_32APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c }
};
/* Cut 3.0 Long Frame Tracking CR Loop */
static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_16APSK_23, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x0a, 0x3a, 0x0a, 0x2a, 0x0a },
{ STV090x_16APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0b, 0x0a, 0x3b, 0x0a, 0x1b, 0x0a },
{ STV090x_16APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
{ STV090x_16APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
{ STV090x_16APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
{ STV090x_16APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
{ STV090x_32APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
{ STV090x_32APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a }
};
static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut20[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_14, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x2d, 0x1f, 0x3d, 0x3e },
{ STV090x_QPSK_13, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x3d, 0x0f, 0x3d, 0x2e },
{ STV090x_QPSK_25, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x2e }
};
static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut30[] = {
/* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
{ STV090x_QPSK_14, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x2a, 0x1c, 0x3a, 0x3b },
{ STV090x_QPSK_13, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x3a, 0x0c, 0x3a, 0x2b },
{ STV090x_QPSK_25, 0x1c, 0x3c, 0x1b, 0x3c, 0x3a, 0x1c, 0x3a, 0x3b, 0x3a, 0x2b }
};
/* Cut 2.0 Short Frame Tracking CR Loop */
static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut20[] = {
/* MODCOD 2M 5M 10M 20M 30M */
{ STV090x_QPSK, 0x2f, 0x2e, 0x0e, 0x0e, 0x3d },
{ STV090x_8PSK, 0x3e, 0x0e, 0x2d, 0x0d, 0x3c },
{ STV090x_16APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d },
{ STV090x_32APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d }
};
/* Cut 3.0 Short Frame Tracking CR Loop */
static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut30[] = {
/* MODCOD 2M 5M 10M 20M 30M */
{ STV090x_QPSK, 0x2C, 0x2B, 0x0B, 0x0B, 0x3A },
{ STV090x_8PSK, 0x3B, 0x0B, 0x2A, 0x0A, 0x39 },
{ STV090x_16APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A },
{ STV090x_32APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A }
};
static inline s32 comp2(s32 __x, s32 __width)
{
if (__width == 32)
return __x;
else
return (__x >= (1 << (__width - 1))) ? (__x - (1 << __width)) : __x;
}
static int stv090x_read_reg(struct stv090x_state *state, unsigned int reg)
{
const struct stv090x_config *config = state->config;
int ret;
u8 b0[] = { reg >> 8, reg & 0xff };
u8 buf;
struct i2c_msg msg[] = {
{ .addr = config->address, .flags = 0, .buf = b0, .len = 2 },
{ .addr = config->address, .flags = I2C_M_RD, .buf = &buf, .len = 1 }
};
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
if (ret != -ERESTARTSYS)
dprintk(FE_ERROR, 1,
"Read error, Reg=[0x%02x], Status=%d",
reg, ret);
return ret < 0 ? ret : -EREMOTEIO;
}
if (unlikely(*state->verbose >= FE_DEBUGREG))
dprintk(FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
reg, buf);
return (unsigned int) buf;
}
static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 *data, u32 count)
{
const struct stv090x_config *config = state->config;
int ret;
u8 buf[2 + count];
struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
buf[0] = reg >> 8;
buf[1] = reg & 0xff;
memcpy(&buf[2], data, count);
if (unlikely(*state->verbose >= FE_DEBUGREG)) {
int i;
printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
for (i = 0; i < count; i++)
printk(" %02x", data[i]);
printk("\n");
}
ret = i2c_transfer(state->i2c, &i2c_msg, 1);
if (ret != 1) {
if (ret != -ERESTARTSYS)
dprintk(FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
reg, data[0], count, ret);
return ret < 0 ? ret : -EREMOTEIO;
}
return 0;
}
static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
{
return stv090x_write_regs(state, reg, &data, 1);
}
static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
reg = STV090x_READ_DEMOD(state, I2CRPT);
if (enable) {
dprintk(FE_DEBUG, 1, "Enable Gate");
STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0)
goto err;
} else {
dprintk(FE_DEBUG, 1, "Disable Gate");
STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 0);
if ((STV090x_WRITE_DEMOD(state, I2CRPT, reg)) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static void stv090x_get_lock_tmg(struct stv090x_state *state)
{
switch (state->algo) {
case STV090x_BLIND_SEARCH:
dprintk(FE_DEBUG, 1, "Blind Search");
if (state->srate <= 1500000) { /*10Msps< SR <=15Msps*/
state->DemodTimeout = 1500;
state->FecTimeout = 400;
} else if (state->srate <= 5000000) { /*10Msps< SR <=15Msps*/
state->DemodTimeout = 1000;
state->FecTimeout = 300;
} else { /*SR >20Msps*/
state->DemodTimeout = 700;
state->FecTimeout = 100;
}
break;
case STV090x_COLD_SEARCH:
case STV090x_WARM_SEARCH:
default:
dprintk(FE_DEBUG, 1, "Normal Search");
if (state->srate <= 1000000) { /*SR <=1Msps*/
state->DemodTimeout = 4500;
state->FecTimeout = 1700;
} else if (state->srate <= 2000000) { /*1Msps < SR <= 2Msps */
state->DemodTimeout = 2500;
state->FecTimeout = 1100;
} else if (state->srate <= 5000000) { /*2Msps < SR <= 5Msps */
state->DemodTimeout = 1000;
state->FecTimeout = 550;
} else if (state->srate <= 10000000) { /*5Msps < SR <= 10Msps */
state->DemodTimeout = 700;
state->FecTimeout = 250;
} else if (state->srate <= 20000000) { /*10Msps < SR <= 20Msps */
state->DemodTimeout = 400;
state->FecTimeout = 130;
} else { /*SR >20Msps*/
state->DemodTimeout = 300;
state->FecTimeout = 100;
}
break;
}
if (state->algo == STV090x_WARM_SEARCH)
state->DemodTimeout /= 2;
}
static int stv090x_set_srate(struct stv090x_state *state, u32 srate)
{
u32 sym;
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->mclk >> 7);
}
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, (sym & 0xff)) < 0) /* LSB */
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_max_srate(struct stv090x_state *state, u32 clk, u32 srate)
{
u32 sym;
srate = 105 * (srate / 100);
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->mclk >> 7);
}
if (sym < 0x7fff) {
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0) /* LSB */
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x7f) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xff) < 0) /* LSB */
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_min_srate(struct stv090x_state *state, u32 clk, u32 srate)
{
u32 sym;
srate = 95 * (srate / 100);
if (srate > 60000000) {
sym = (srate << 4); /* SR * 2^16 / master_clk */
sym /= (state->mclk >> 12);
} else if (srate > 6000000) {
sym = (srate << 6);
sym /= (state->mclk >> 10);
} else {
sym = (srate << 9);
sym /= (state->mclk >> 7);
}
if (STV090x_WRITE_DEMOD(state, SFRLOW1, ((sym >> 8) & 0xff)) < 0) /* MSB */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, (sym & 0xff)) < 0) /* LSB */
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_car_width(u32 srate, enum stv090x_rolloff rolloff)
{
u32 ro;
switch (rolloff) {
case STV090x_RO_20:
ro = 20;
break;
case STV090x_RO_25:
ro = 25;
break;
case STV090x_RO_35:
default:
ro = 35;
break;
}
return srate + (srate * ro) / 100;
}
static int stv090x_set_vit_thacq(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, VTH12, 0x96) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH23, 0x64) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH34, 0x36) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH56, 0x23) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH67, 0x1e) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH78, 0x19) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_vit_thtracq(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, VTH12, 0xd0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH23, 0x7d) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH34, 0x53) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH56, 0x2f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH67, 0x24) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VTH78, 0x1f) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_viterbi(struct stv090x_state *state)
{
switch (state->search_mode) {
case STV090x_SEARCH_AUTO:
if (STV090x_WRITE_DEMOD(state, FECM, 0x10) < 0) /* DVB-S and DVB-S2 */
goto err;
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x3f) < 0) /* all puncture rate */
goto err;
break;
case STV090x_SEARCH_DVBS1:
if (STV090x_WRITE_DEMOD(state, FECM, 0x00) < 0) /* disable DSS */
goto err;
switch (state->fec) {
case STV090x_PR12:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
goto err;
break;
case STV090x_PR23:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
goto err;
break;
case STV090x_PR34:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x04) < 0)
goto err;
break;
case STV090x_PR56:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x08) < 0)
goto err;
break;
case STV090x_PR78:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x20) < 0)
goto err;
break;
default:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x2f) < 0) /* all */
goto err;
break;
}
break;
case STV090x_SEARCH_DSS:
if (STV090x_WRITE_DEMOD(state, FECM, 0x80) < 0)
goto err;
switch (state->fec) {
case STV090x_PR12:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
goto err;
break;
case STV090x_PR23:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
goto err;
break;
case STV090x_PR67:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x10) < 0)
goto err;
break;
default:
if (STV090x_WRITE_DEMOD(state, PRVIT, 0x13) < 0) /* 1/2, 2/3, 6/7 */
goto err;
break;
}
break;
default:
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_stop_modcod(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xff) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_activate_modcod(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xfc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_activate_modcod_single(struct stv090x_state *state)
{
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0x0f) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_vitclk_ctl(struct stv090x_state *state, int enable)
{
u32 reg;
switch (state->demod) {
case STV090x_DEMODULATOR_0:
mutex_lock(&demod_lock);
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, enable);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
mutex_unlock(&demod_lock);
break;
case STV090x_DEMODULATOR_1:
mutex_lock(&demod_lock);
reg = stv090x_read_reg(state, STV090x_STOPCLK2);
STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, enable);
if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
goto err;
mutex_unlock(&demod_lock);
break;
default:
dprintk(FE_ERROR, 1, "Wrong demodulator!");
break;
}
return 0;
err:
mutex_unlock(&demod_lock);
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_dvbs_track_crl(struct stv090x_state *state)
{
if (state->dev_ver >= 0x30) {
/* Set ACLC BCLC optimised value vs SR */
if (state->srate >= 15000000) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x2b) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1a) < 0)
goto err;
} else if ((state->srate >= 7000000) && (15000000 > state->srate)) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x0c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1b) < 0)
goto err;
} else if (state->srate < 7000000) {
if (STV090x_WRITE_DEMOD(state, ACLC, 0x2c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x1c) < 0)
goto err;
}
} else {
/* Cut 2.0 */
if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_delivery_search(struct stv090x_state *state)
{
u32 reg;
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
/* Activate Viterbi decoder in legacy search,
* do not use FRESVIT1, might impact VITERBI2
*/
if (stv090x_vitclk_ctl(state, 0) < 0)
goto err;
if (stv090x_dvbs_track_crl(state) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x22) < 0) /* disable DVB-S2 */
goto err;
if (stv090x_set_vit_thacq(state) < 0)
goto err;
if (stv090x_set_viterbi(state) < 0)
goto err;
break;
case STV090x_SEARCH_DVBS2:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (stv090x_vitclk_ctl(state, 1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0) /* stop DVB-S CR loop */
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
goto err;
if (state->dev_ver <= 0x20) {
/* enable S2 carrier loop */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
goto err;
} else {
/* > Cut 3: Stop carrier 3 */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
goto err;
}
if (state->demod_mode != STV090x_SINGLE) {
/* Cut 2: enable link during search */
if (stv090x_activate_modcod(state) < 0)
goto err;
} else {
/* Single demodulator
* Authorize SHORT and LONG frames,
* QPSK, 8PSK, 16APSK and 32APSK
*/
if (stv090x_activate_modcod_single(state) < 0)
goto err;
}
break;
case STV090x_SEARCH_AUTO:
default:
/* enable DVB-S2 and DVB-S2 in Auto MODE */
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (stv090x_vitclk_ctl(state, 0) < 0)
goto err;
if (stv090x_dvbs_track_crl(state) < 0)
goto err;
if (state->dev_ver <= 0x20) {
/* enable S2 carrier loop */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
goto err;
} else {
/* > Cut 3: Stop carrier 3 */
if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
goto err;
}
if (state->demod_mode != STV090x_SINGLE) {
/* Cut 2: enable link during search */
if (stv090x_activate_modcod(state) < 0)
goto err;
} else {
/* Single demodulator
* Authorize SHORT and LONG frames,
* QPSK, 8PSK, 16APSK and 32APSK
*/
if (stv090x_activate_modcod_single(state) < 0)
goto err;
}
if (state->srate >= 2000000) {
/* Srate >= 2MSPS, Viterbi threshold to acquire */
if (stv090x_set_vit_thacq(state) < 0)
goto err;
} else {
/* Srate < 2MSPS, Reset Viterbi thresholdto track
* and then re-acquire
*/
if (stv090x_set_vit_thtracq(state) < 0)
goto err;
}
if (stv090x_set_viterbi(state) < 0)
goto err;
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_start_search(struct stv090x_state *state)
{
u32 reg, freq_abs;
s16 freq;
/* Reset demodulator */
reg = STV090x_READ_DEMOD(state, DMDISTATE);
STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f);
if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
goto err;
if (state->dev_ver <= 0x20) {
if (state->srate <= 5000000) {
if (STV090x_WRITE_DEMOD(state, CARCFG, 0x44) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP1, 0x0f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP1, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW1, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW0, 0x00) < 0)
goto err;
/*enlarge the timing bandwith for Low SR*/
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0)
goto err;
} else {
/* If the symbol rate is >5 Msps
Set The carrier search up and low to auto mode */
if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
goto err;
/*reduce the timing bandwith for high SR*/
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
goto err;
}
} else {
/* >= Cut 3 */
if (state->srate <= 5000000) {
/* enlarge the timing bandwith for Low SR */
STV090x_WRITE_DEMOD(state, RTCS2, 0x68);
} else {
/* reduce timing bandwith for high SR */
STV090x_WRITE_DEMOD(state, RTCS2, 0x44);
}
/* Set CFR min and max to manual mode */
STV090x_WRITE_DEMOD(state, CARCFG, 0x46);
if (state->algo == STV090x_WARM_SEARCH) {
/* WARM Start
* CFR min = -1MHz,
* CFR max = +1MHz
*/
freq_abs = 1000 << 16;
freq_abs /= (state->mclk / 1000);
freq = (s16) freq_abs;
} else {
/* COLD Start
* CFR min =- (SearchRange / 2 + 600KHz)
* CFR max = +(SearchRange / 2 + 600KHz)
* (600KHz for the tuner step size)
*/
freq_abs = (state->search_range / 2000) + 600;
freq_abs = freq_abs << 16;
freq_abs /= (state->mclk / 1000);
freq = (s16) freq_abs;
}
if (STV090x_WRITE_DEMOD(state, CFRUP1, MSB(freq)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRUP1, LSB(freq)) < 0)
goto err;
freq *= -1;
if (STV090x_WRITE_DEMOD(state, CFRLOW1, MSB(freq)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRLOW0, LSB(freq)) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0) < 0)
goto err;
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
goto err;
if ((state->search_mode == STV090x_DVBS1) ||
(state->search_mode == STV090x_DSS) ||
(state->search_mode == STV090x_SEARCH_AUTO)) {
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xe0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xc0) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFG2);
STV090x_SETFIELD_Px(reg, S1S2_SEQUENTIAL_FIELD, 0x0);
if (STV090x_WRITE_DEMOD(state, DMDCFG2, reg) < 0)
goto err;
if (state->dev_ver >= 0x20) {
/*Frequency offset detector setting*/
if (state->srate < 2000000) {
if (state->dev_ver <= 0x20) {
/* Cut 2 */
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x39) < 0)
goto err;
} else {
/* Cut 2 */
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x89) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, CARHDR, 0x40) < 0)
goto err;
}
if (state->srate < 10000000) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4c) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4b) < 0)
goto err;
}
} else {
if (state->srate < 10000000) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xef) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xed) < 0)
goto err;
}
}
switch (state->algo) {
case STV090x_WARM_SEARCH:
/* The symbol rate and the exact
* carrier Frequency are known
*/
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
break;
case STV090x_COLD_SEARCH:
/* The symbol rate is known */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
break;
default:
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_agc2_min_level(struct stv090x_state *state)
{
u32 agc2_min = 0, agc2 = 0, freq_init, freq_step, reg;
s32 i, j, steps, dir;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0) /* SR = 65 Msps Max */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0) /* SR= 400 ksps Min */
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0) /* stop acq @ coarse carrier state */
goto err;
if (stv090x_set_srate(state, 1000000) < 0)
goto err;
steps = -1 + state->search_range / 1000000;
steps /= 2;
steps = (2 * steps) + 1;
if (steps < 0)
steps = 1;
dir = 1;
freq_step = (1000000 * 256) / (state->mclk / 256);
freq_init = 0;
for (i = 0; i < steps; i++) {
if (dir > 0)
freq_init = freq_init + (freq_step * i);
else
freq_init = freq_init - (freq_step * i);
dir = -1;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod RESET */
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_init >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_init & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x58) < 0) /* Demod RESET */
goto err;
msleep(10);
for (j = 0; j < 10; j++) {
agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
}
agc2 /= 10;
agc2_min = 0xffff;
if (agc2 < 0xffff)
agc2_min = agc2;
}
return agc2_min;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_get_srate(struct stv090x_state *state, u32 clk)
{
u8 r3, r2, r1, r0;
s32 srate, int_1, int_2, tmp_1, tmp_2;
r3 = STV090x_READ_DEMOD(state, SFR3);
r2 = STV090x_READ_DEMOD(state, SFR2);
r1 = STV090x_READ_DEMOD(state, SFR1);
r0 = STV090x_READ_DEMOD(state, SFR0);
srate = ((r3 << 24) | (r2 << 16) | (r1 << 8) | r0);
int_1 = clk >> 16;
int_2 = srate >> 16;
tmp_1 = clk % 0x10000;
tmp_2 = srate % 0x10000;
srate = (int_1 * int_2) +
((int_1 * tmp_2) >> 16) +
((int_2 * tmp_1) >> 16);
return srate;
}
static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
int tmg_lock = 0, i;
s32 tmg_cpt = 0, dir = 1, steps, cur_step = 0, freq;
u32 srate_coarse = 0, agc2 = 0, car_step = 1200, reg;
reg = STV090x_READ_DEMOD(state, DMDISTATE);
STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f); /* Demod RESET */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0x12) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xf0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xe0) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x60) < 0)
goto err;
if (state->dev_ver >= 0x30) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x99) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
goto err;
} else if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x6a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
goto err;
}
if (state->srate <= 2000000)
car_step = 1000;
else if (state->srate <= 5000000)
car_step = 2000;
else if (state->srate <= 12000000)
car_step = 3000;
else
car_step = 5000;
steps = -1 + ((state->search_range / 1000) / car_step);
steps /= 2;
steps = (2 * steps) + 1;
if (steps < 0)
steps = 1;
else if (steps > 10) {
steps = 11;
car_step = (state->search_range / 1000) / 10;
}
cur_step = 0;
dir = 1;
freq = state->frequency;
while ((!tmg_lock) && (cur_step < steps)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5f) < 0) /* Demod RESET */
goto err;
reg = STV090x_READ_DEMOD(state, DMDISTATE);
STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x00); /* trigger acquisition */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
goto err;
msleep(50);
for (i = 0; i < 10; i++) {
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
tmg_cpt++;
agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
}
agc2 /= 10;
srate_coarse = stv090x_get_srate(state, state->mclk);
cur_step++;
dir *= -1;
if ((tmg_cpt >= 5) && (agc2 < 0x1f00) && (srate_coarse < 55000000) && (srate_coarse > 850000))
tmg_lock = 1;
else if (cur_step < steps) {
if (dir > 0)
freq += cur_step * car_step;
else
freq -= cur_step * car_step;
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
goto err;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
msleep(50);
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
if (state->config->tuner_get_status(fe, ®) < 0)
goto err;
}
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
}
}
if (!tmg_lock)
srate_coarse = 0;
else
srate_coarse = stv090x_get_srate(state, state->mclk);
return srate_coarse;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_srate_srch_fine(struct stv090x_state *state)
{
u32 srate_coarse, freq_coarse, sym, reg;
srate_coarse = stv090x_get_srate(state, state->mclk);
freq_coarse = STV090x_READ_DEMOD(state, CFR2) << 8;
freq_coarse |= STV090x_READ_DEMOD(state, CFR1);
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
if (sym < state->srate)
srate_coarse = 0;
else {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0) /* Demod RESET */
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0x01) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (state->dev_ver >= 0x30) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x79) < 0)
goto err;
} else if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
}
if (srate_coarse > 3000000) {
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
sym = (sym / 1000) * 65536;
sym /= (state->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
goto err;
sym = 10 * (srate_coarse / 13); /* SFRLOW = SFR - 30% */
sym = (sym / 1000) * 65536;
sym /= (state->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
goto err;
sym = (srate_coarse / 1000) * 65536;
sym /= (state->mclk / 1000);
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
goto err;
} else {
sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
sym = (sym / 100) * 65536;
sym /= (state->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
goto err;
sym = 10 * (srate_coarse / 14); /* SFRLOW = SFR - 30% */
sym = (sym / 100) * 65536;
sym /= (state->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
goto err;
sym = (srate_coarse / 100) * 65536;
sym /= (state->mclk / 100);
if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_coarse >> 8) & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_coarse & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0) /* trigger acquisition */
goto err;
}
return srate_coarse;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_dmdlock(struct stv090x_state *state, s32 timeout)
{
s32 timer = 0, lock = 0;
u32 reg;
u8 stat;
while ((timer < timeout) && (!lock)) {
reg = STV090x_READ_DEMOD(state, DMDSTATE);
stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (stat) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
dprintk(FE_DEBUG, 1, "Demodulator searching ..");
lock = 0;
break;
case 2: /* DVB-S2 mode */
case 3: /* DVB-S1/legacy mode */
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
break;
}
if (!lock)
msleep(10);
else
dprintk(FE_DEBUG, 1, "Demodulator acquired LOCK");
timer += 10;
}
return lock;
}
static int stv090x_blind_search(struct stv090x_state *state)
{
u32 agc2, reg, srate_coarse;
s32 timeout_dmd = 500, cpt_fail, agc2_ovflw, i;
u8 k_ref, k_max, k_min;
int coarse_fail, lock;
k_max = 120;
k_min = 30;
agc2 = stv090x_get_agc2_min_level(state);
if (agc2 > STV090x_SEARCH_AGC2_TH(state->dev_ver)) {
lock = 0;
} else {
if (state->dev_ver <= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
goto err;
} else {
/* > Cut 3 */
if (STV090x_WRITE_DEMOD(state, CARCFG, 0x06) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
goto err;
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0) /* set viterbi hysteresis */
goto err;
}
k_ref = k_max;
do {
if (STV090x_WRITE_DEMOD(state, KREFTMG, k_ref) < 0)
goto err;
if (stv090x_srate_srch_coarse(state) != 0) {
srate_coarse = stv090x_srate_srch_fine(state);
if (srate_coarse != 0) {
stv090x_get_lock_tmg(state);
lock = stv090x_get_dmdlock(state, timeout_dmd);
} else {
lock = 0;
}
} else {
cpt_fail = 0;
agc2_ovflw = 0;
for (i = 0; i < 10; i++) {
agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
if (agc2 >= 0xff00)
agc2_ovflw++;
reg = STV090x_READ_DEMOD(state, DSTATUS2);
if ((STV090x_GETFIELD_Px(reg, CFR_OVERFLOW_FIELD) == 0x01) &&
(STV090x_GETFIELD_Px(reg, DEMOD_DELOCK_FIELD) == 0x01))
cpt_fail++;
}
if ((cpt_fail > 7) || (agc2_ovflw > 7))
coarse_fail = 1;
lock = 0;
}
k_ref -= 30;
} while ((k_ref >= k_min) && (!lock) && (!coarse_fail));
}
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_chk_tmg(struct stv090x_state *state)
{
u32 reg;
s32 tmg_cpt = 0, i;
u8 freq, tmg_thh, tmg_thl;
int tmg_lock;
freq = STV090x_READ_DEMOD(state, CARFREQ);
tmg_thh = STV090x_READ_DEMOD(state, TMGTHRISE);
tmg_thl = STV090x_READ_DEMOD(state, TMGTHFALL);
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00); /* stop carrier offset search */
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTC, 0x80) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x40) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0) /* set car ofset to 0 */
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x65) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0) /* trigger acquisition */
goto err;
msleep(10);
for (i = 0; i < 10; i++) {
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
tmg_cpt++;
msleep(1);
}
if (tmg_cpt >= 3)
tmg_lock = 1;
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, RTC, 0x88) < 0) /* DVB-S1 timing */
goto err;
if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0) /* DVB-S2 timing */
goto err;
if (STV090x_WRITE_DEMOD(state, CARFREQ, freq) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHRISE, tmg_thh) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGTHFALL, tmg_thl) < 0)
goto err;
return tmg_lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
{
struct dvb_frontend *fe = &state->frontend;
u32 reg;
s32 car_step, steps, cur_step, dir, freq, timeout_lock;
int lock = 0;
if (state->srate >= 10000000)
timeout_lock = timeout_dmd / 3;
else
timeout_lock = timeout_dmd / 2;
lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
if (!lock) {
if (state->srate >= 10000000) {
if (stv090x_chk_tmg(state)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
lock = stv090x_get_dmdlock(state, timeout_dmd);
} else {
lock = 0;
}
} else {
if (state->srate <= 4000000)
car_step = 1000;
else if (state->srate <= 7000000)
car_step = 2000;
else if (state->srate <= 10000000)
car_step = 3000;
else
car_step = 5000;
steps = (state->search_range / 1000) / car_step;
steps /= 2;
steps = 2 * (steps + 1);
if (steps < 0)
steps = 2;
else if (steps > 12)
steps = 12;
cur_step = 1;
dir = 1;
if (!lock) {
freq = state->frequency;
state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
while ((cur_step <= steps) && (!lock)) {
if (dir > 0)
freq += cur_step * car_step;
else
freq -= cur_step * car_step;
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
goto err;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
msleep(50);
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
if (state->config->tuner_get_status(fe, ®) < 0)
goto err;
}
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
if (state->delsys == STV090x_DVBS2) {
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
goto err;
lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
dir *= -1;
cur_step++;
}
}
}
}
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_loop_params(struct stv090x_state *state, s32 *freq_inc, s32 *timeout_sw, s32 *steps)
{
s32 timeout, inc, steps_max, srate, car_max;
srate = state->srate;
car_max = state->search_range / 1000;
car_max += car_max / 10;
car_max = 65536 * (car_max / 2);
car_max /= (state->mclk / 1000);
if (car_max > 0x4000)
car_max = 0x4000 ; /* maxcarrier should be<= +-1/4 Mclk */
inc = srate;
inc /= state->mclk / 1000;
inc *= 256;
inc *= 256;
inc /= 1000;
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
inc *= 3; /* freq step = 3% of srate */
timeout = 20;
break;
case STV090x_SEARCH_DVBS2:
inc *= 4;
timeout = 25;
break;
case STV090x_SEARCH_AUTO:
default:
inc *= 3;
timeout = 25;
break;
}
inc /= 100;
if ((inc > car_max) || (inc < 0))
inc = car_max / 2; /* increment <= 1/8 Mclk */
timeout *= 27500; /* 27.5 Msps reference */
if (srate > 0)
timeout /= (srate / 1000);
if ((timeout > 100) || (timeout < 0))
timeout = 100;
steps_max = (car_max / inc) + 1; /* min steps = 3 */
if ((steps_max > 100) || (steps_max < 0)) {
steps_max = 100; /* max steps <= 100 */
inc = car_max / steps_max;
}
*freq_inc = inc;
*timeout_sw = timeout;
*steps = steps_max;
return 0;
}
static int stv090x_chk_signal(struct stv090x_state *state)
{
s32 offst_car, agc2, car_max;
int no_signal;
offst_car = STV090x_READ_DEMOD(state, CFR2) << 8;
offst_car |= STV090x_READ_DEMOD(state, CFR1);
offst_car = comp2(offst_car, 16);
agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
car_max = state->search_range / 1000;
car_max += (car_max / 10); /* 10% margin */
car_max = (65536 * car_max / 2);
car_max /= state->mclk / 1000;
if (car_max > 0x4000)
car_max = 0x4000;
if ((agc2 > 0x2000) || (offst_car > 2 * car_max) || (offst_car < -2 * car_max)) {
no_signal = 1;
dprintk(FE_DEBUG, 1, "No Signal");
} else {
no_signal = 0;
dprintk(FE_DEBUG, 1, "Found Signal");
}
return no_signal;
}
static int stv090x_search_car_loop(struct stv090x_state *state, s32 inc, s32 timeout, int zigzag, s32 steps_max)
{
int no_signal, lock = 0;
s32 cpt_step = 0, offst_freq, car_max;
u32 reg;
car_max = state->search_range / 1000;
car_max += (car_max / 10);
car_max = (65536 * car_max / 2);
car_max /= (state->mclk / 1000);
if (car_max > 0x4000)
car_max = 0x4000;
if (zigzag)
offst_freq = 0;
else
offst_freq = -car_max + inc;
do {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, ((offst_freq / 256) & 0xff)) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, offst_freq & 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x1); /* stop DVB-S2 packet delin */
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
if (zigzag) {
if (offst_freq >= 0)
offst_freq = -offst_freq - 2 * inc;
else
offst_freq = -offst_freq;
} else {
offst_freq += 2 * inc;
}
cpt_step++;
lock = stv090x_get_dmdlock(state, timeout);
no_signal = stv090x_chk_signal(state);
} while ((!lock) &&
(!no_signal) &&
((offst_freq - inc) < car_max) &&
((offst_freq + inc) > -car_max) &&
(cpt_step < steps_max));
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_sw_algo(struct stv090x_state *state)
{
int no_signal, zigzag, lock = 0;
u32 reg;
s32 dvbs2_fly_wheel;
s32 inc, timeout_step, trials, steps_max;
/* get params */
stv090x_get_loop_params(state, &inc, &timeout_step, &steps_max);
switch (state->search_mode) {
case STV090x_SEARCH_DVBS1:
case STV090x_SEARCH_DSS:
/* accelerate the frequency detector */
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3B) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x49) < 0)
goto err;
zigzag = 0;
break;
case STV090x_SEARCH_DVBS2:
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
goto err;
zigzag = 1;
break;
case STV090x_SEARCH_AUTO:
default:
/* accelerate the frequency detector */
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3b) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0xc9) < 0)
goto err;
zigzag = 0;
break;
}
trials = 0;
do {
lock = stv090x_search_car_loop(state, inc, timeout_step, zigzag, steps_max);
no_signal = stv090x_chk_signal(state);
trials++;
/*run the SW search 2 times maximum*/
if (lock || no_signal || (trials == 2)) {
/*Check if the demod is not losing lock in DVBS2*/
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DMDSTATE);
if ((lock) && (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == STV090x_DVBS2)) {
/*Check if the demod is not losing lock in DVBS2*/
msleep(timeout_step);
reg = STV090x_READ_DEMOD(state, DMDFLYW);
dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
if (dvbs2_fly_wheel < 0xd) { /*if correct frames is decrementing */
msleep(timeout_step);
reg = STV090x_READ_DEMOD(state, DMDFLYW);
dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
}
if (dvbs2_fly_wheel < 0xd) {
/*FALSE lock, The demod is loosing lock */
lock = 0;
if (trials < 2) {
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
goto err;
}
}
}
}
} while ((!lock) && (trials < 2) && (!no_signal));
return lock;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum stv090x_delsys stv090x_get_std(struct stv090x_state *state)
{
u32 reg;
enum stv090x_delsys delsys;
reg = STV090x_READ_DEMOD(state, DMDSTATE);
if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 2)
delsys = STV090x_DVBS2;
else if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 3) {
reg = STV090x_READ_DEMOD(state, FECM);
if (STV090x_GETFIELD_Px(reg, DSS_DVB_FIELD) == 1)
delsys = STV090x_DSS;
else
delsys = STV090x_DVBS1;
} else {
delsys = STV090x_ERROR;
}
return delsys;
}
/* in Hz */
static s32 stv090x_get_car_freq(struct stv090x_state *state, u32 mclk)
{
s32 derot, int_1, int_2, tmp_1, tmp_2;
derot = STV090x_READ_DEMOD(state, CFR2) << 16;
derot |= STV090x_READ_DEMOD(state, CFR1) << 8;
derot |= STV090x_READ_DEMOD(state, CFR0);
derot = comp2(derot, 24);
int_1 = state->mclk >> 12;
int_2 = derot >> 12;
/* carrier_frequency = MasterClock * Reg / 2^24 */
tmp_1 = state->mclk % 0x1000;
tmp_2 = derot % 0x1000;
derot = (int_1 * int_2) +
((int_1 * tmp_2) >> 12) +
((int_1 * tmp_1) >> 12);
return derot;
}
static int stv090x_get_viterbi(struct stv090x_state *state)
{
u32 reg, rate;
reg = STV090x_READ_DEMOD(state, VITCURPUN);
rate = STV090x_GETFIELD_Px(reg, VIT_CURPUN_FIELD);
switch (rate) {
case 13:
state->fec = STV090x_PR12;
break;
case 18:
state->fec = STV090x_PR23;
break;
case 21:
state->fec = STV090x_PR34;
break;
case 24:
state->fec = STV090x_PR56;
break;
case 25:
state->fec = STV090x_PR67;
break;
case 26:
state->fec = STV090x_PR78;
break;
default:
state->fec = STV090x_PRERR;
break;
}
return 0;
}
static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
u8 tmg;
u32 reg;
s32 i = 0, offst_freq;
msleep(5);
if (state->algo == STV090x_BLIND_SEARCH) {
tmg = STV090x_READ_DEMOD(state, TMGREG2);
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x5c);
while ((i <= 50) && (tmg != 0) && (tmg != 0xff)) {
tmg = STV090x_READ_DEMOD(state, TMGREG2);
msleep(5);
i += 5;
}
}
state->delsys = stv090x_get_std(state);
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
offst_freq = stv090x_get_car_freq(state, state->mclk) / 1000;
state->frequency += offst_freq;
if (stv090x_get_viterbi(state) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DMDMODCOD);
state->modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
state->pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
state->frame_len = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) >> 1;
reg = STV090x_READ_DEMOD(state, TMGOBS);
state->rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
reg = STV090x_READ_DEMOD(state, FECM);
state->inversion = STV090x_GETFIELD_Px(reg, IQINV_FIELD);
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
return STV090x_RANGEOK;
else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
return STV090x_RANGEOK;
else
return STV090x_OUTOFRANGE; /* Out of Range */
} else {
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
return STV090x_RANGEOK;
else
return STV090x_OUTOFRANGE;
}
return STV090x_OUTOFRANGE;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static u32 stv090x_get_tmgoffst(struct stv090x_state *state, u32 srate)
{
s32 offst_tmg;
offst_tmg = STV090x_READ_DEMOD(state, TMGREG2) << 16;
offst_tmg |= STV090x_READ_DEMOD(state, TMGREG1) << 8;
offst_tmg |= STV090x_READ_DEMOD(state, TMGREG0);
offst_tmg = comp2(offst_tmg, 24); /* 2's complement */
if (!offst_tmg)
offst_tmg = 1;
offst_tmg = ((s32) srate * 10) / ((s32) 0x1000000 / offst_tmg);
offst_tmg /= 320;
return offst_tmg;
}
static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_modcod modcod, s32 pilots)
{
u8 aclc = 0x29;
s32 i;
struct stv090x_long_frame_crloop *car_loop, *car_loop_qpsk_low, *car_loop_apsk_low;
if (state->dev_ver == 0x20) {
car_loop = stv090x_s2_crl_cut20;
car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut20;
car_loop_apsk_low = stv090x_s2_apsk_crl_cut20;
} else {
/* >= Cut 3 */
car_loop = stv090x_s2_crl_cut30;
car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut30;
car_loop_apsk_low = stv090x_s2_apsk_crl_cut30;
}
if (modcod < STV090x_QPSK_12) {
i = 0;
while ((i < 3) && (modcod != car_loop_qpsk_low[i].modcod))
i++;
if (i >= 3)
i = 2;
} else {
i = 0;
while ((i < 14) && (modcod != car_loop[i].modcod))
i++;
if (i >= 14) {
i = 0;
while ((i < 11) && (modcod != car_loop_apsk_low[i].modcod))
i++;
if (i >= 11)
i = 10;
}
}
if (modcod <= STV090x_QPSK_25) {
if (pilots) {
if (state->srate <= 3000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop_qpsk_low[i].crl_pilots_on_20;
else
aclc = car_loop_qpsk_low[i].crl_pilots_on_30;
} else {
if (state->srate <= 3000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_2;
else if (state->srate <= 7000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_5;
else if (state->srate <= 15000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_10;
else if (state->srate <= 25000000)
aclc = car_loop_qpsk_low[i].crl_pilots_off_20;
else
aclc = car_loop_qpsk_low[i].crl_pilots_off_30;
}
} else if (modcod <= STV090x_8PSK_910) {
if (pilots) {
if (state->srate <= 3000000)
aclc = car_loop[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop[i].crl_pilots_on_20;
else
aclc = car_loop[i].crl_pilots_on_30;
} else {
if (state->srate <= 3000000)
aclc = car_loop[i].crl_pilots_off_2;
else if (state->srate <= 7000000)
aclc = car_loop[i].crl_pilots_off_5;
else if (state->srate <= 15000000)
aclc = car_loop[i].crl_pilots_off_10;
else if (state->srate <= 25000000)
aclc = car_loop[i].crl_pilots_off_20;
else
aclc = car_loop[i].crl_pilots_off_30;
}
} else { /* 16APSK and 32APSK */
if (state->srate <= 3000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_2;
else if (state->srate <= 7000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_5;
else if (state->srate <= 15000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_10;
else if (state->srate <= 25000000)
aclc = car_loop_apsk_low[i].crl_pilots_on_20;
else
aclc = car_loop_apsk_low[i].crl_pilots_on_30;
}
return aclc;
}
static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
{
struct stv090x_short_frame_crloop *short_crl = NULL;
s32 index = 0;
u8 aclc = 0x0b;
switch (state->modulation) {
case STV090x_QPSK:
default:
index = 0;
break;
case STV090x_8PSK:
index = 1;
break;
case STV090x_16APSK:
index = 2;
break;
case STV090x_32APSK:
index = 3;
break;
}
if (state->dev_ver >= 0x30) {
/* Cut 3.0 and up */
short_crl = stv090x_s2_short_crl_cut30;
} else {
/* Cut 2.0 and up: we don't support cuts older than 2.0 */
short_crl = stv090x_s2_short_crl_cut20;
}
if (state->srate <= 3000000)
aclc = short_crl[index].crl_2;
else if (state->srate <= 7000000)
aclc = short_crl[index].crl_5;
else if (state->srate <= 15000000)
aclc = short_crl[index].crl_10;
else if (state->srate <= 25000000)
aclc = short_crl[index].crl_20;
else
aclc = short_crl[index].crl_30;
return aclc;
}
static int stv090x_optimize_track(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
enum stv090x_rolloff rolloff;
enum stv090x_modcod modcod;
s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
u32 reg;
srate = stv090x_get_srate(state, state->mclk);
srate += stv090x_get_tmgoffst(state, srate);
switch (state->delsys) {
case STV090x_DVBS1:
case STV090x_DSS:
if (state->algo == STV090x_SEARCH_AUTO) {
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x01);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (state->dev_ver >= 0x30) {
if (stv090x_get_viterbi(state) < 0)
goto err;
if (state->fec == STV090x_PR12) {
if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x98) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x18) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
goto err;
break;
case STV090x_DVBS2:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
goto err;
if (state->frame_len == STV090x_LONG_FRAME) {
reg = STV090x_READ_DEMOD(state, DMDMODCOD);
modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
aclc = stv090x_optimize_carloop(state, modcod, pilots);
if (modcod <= STV090x_QPSK_910) {
STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc);
} else if (modcod <= STV090x_8PSK_910) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
goto err;
}
if ((state->demod_mode == STV090x_SINGLE) && (modcod > STV090x_8PSK_910)) {
if (modcod <= STV090x_16APSK_910) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
goto err;
}
}
} else {
/*Carrier loop setting for short frame*/
aclc = stv090x_optimize_carloop_short(state);
if (state->modulation == STV090x_QPSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_8PSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_16APSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
goto err;
} else if (state->modulation == STV090x_32APSK) {
if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
goto err;
}
}
STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67); /* PER */
break;
case STV090x_UNKNOWN:
default:
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
break;
}
f_1 = STV090x_READ_DEMOD(state, CFR2);
f_0 = STV090x_READ_DEMOD(state, CFR1);
reg = STV090x_READ_DEMOD(state, TMGOBS);
rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
if (state->algo == STV090x_BLIND_SEARCH) {
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
reg = STV090x_READ_DEMOD(state, DMDCFGMD);
STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0)
goto err;
if (stv090x_set_srate(state, srate) < 0)
goto err;
blind_tune = 1;
}
if (state->dev_ver >= 0x20) {
if ((state->search_mode == STV090x_SEARCH_DVBS1) ||
(state->search_mode == STV090x_SEARCH_DSS) ||
(state->search_mode == STV090x_SEARCH_AUTO)) {
if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x0a) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x00) < 0)
goto err;
}
}
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
/* AUTO tracking MODE */
if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x80) < 0)
goto err;
/* AUTO tracking MODE */
if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x80) < 0)
goto err;
if ((state->dev_ver >= 0x20) || (blind_tune == 1) || (state->srate < 10000000)) {
/* update initial carrier freq with the found freq offset */
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
state->tuner_bw = stv090x_car_width(srate, state->rolloff) + 10000000;
if ((state->dev_ver >= 0x20) || (blind_tune == 1)) {
if (state->algo != STV090x_WARM_SEARCH) {
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
}
}
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000))
msleep(50); /* blind search: wait 50ms for SR stabilization */
else
msleep(5);
stv090x_get_lock_tmg(state);
if (!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
i = 0;
while ((!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) && (i <= 2)) {
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
goto err;
i++;
}
}
}
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
goto err;
}
if ((state->delsys == STV090x_DVBS1) || (state->delsys == STV090x_DSS))
stv090x_set_vit_thtracq(state);
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_get_feclock(struct stv090x_state *state, s32 timeout)
{
s32 timer = 0, lock = 0, stat;
u32 reg;
while ((timer < timeout) && (!lock)) {
reg = STV090x_READ_DEMOD(state, DMDSTATE);
stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (stat) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
lock = 0;
break;
case 2: /* DVB-S2 mode */
reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
lock = STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD);
break;
case 3: /* DVB-S1/legacy mode */
reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
lock = STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD);
break;
}
if (!lock) {
msleep(10);
timer += 10;
}
}
return lock;
}
static int stv090x_get_lock(struct stv090x_state *state, s32 timeout_dmd, s32 timeout_fec)
{
u32 reg;
s32 timer = 0;
int lock;
lock = stv090x_get_dmdlock(state, timeout_dmd);
if (lock)
lock = stv090x_get_feclock(state, timeout_fec);
if (lock) {
lock = 0;
while ((timer < timeout_fec) && (!lock)) {
reg = STV090x_READ_DEMOD(state, TSSTATUS);
lock = STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD);
msleep(1);
timer++;
}
}
return lock;
}
static int stv090x_set_s2rolloff(struct stv090x_state *state)
{
u32 reg;
if (state->dev_ver <= 0x20) {
/* rolloff to auto mode if DVBS2 */
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
} else {
/* DVB-S2 rolloff to auto mode if DVBS2 */
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
enum stv090x_signal_state signal_state = STV090x_NOCARRIER;
u32 reg;
s32 timeout_dmd = 500, timeout_fec = 50, agc1_power, power_iq = 0, i;
int lock = 0, low_sr = 0, no_signal = 0;
reg = STV090x_READ_DEMOD(state, TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* Stop path 1 stream merger */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod stop */
goto err;
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0) /* cut 2.0 */
goto err;
}
stv090x_get_lock_tmg(state);
if (state->algo == STV090x_BLIND_SEARCH) {
state->tuner_bw = 2 * 36000000; /* wide bw for unknown srate */
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc0) < 0) /* wider srate scan */
goto err;
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
goto err;
if (stv090x_set_srate(state, 1000000) < 0) /* inital srate = 1Msps */
goto err;
} else {
/* known srate */
if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
goto err;
if (state->srate < 2000000) {
/* SR < 2MSPS */
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x63) < 0)
goto err;
} else {
/* SR >= 2Msps */
if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
goto err;
}
if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
goto err;
if (state->dev_ver >= 0x20) {
if (STV090x_WRITE_DEMOD(state, KREFTMG, 0x5a) < 0)
goto err;
if (state->algo == STV090x_COLD_SEARCH)
state->tuner_bw = (15 * (stv090x_car_width(state->srate, state->rolloff) + 10000000)) / 10;
else if (state->algo == STV090x_WARM_SEARCH)
state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + 10000000;
}
/* if cold start or warm (Symbolrate is known)
* use a Narrow symbol rate scan range
*/
if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0) /* narrow srate scan */
goto err;
if (stv090x_set_srate(state, state->srate) < 0)
goto err;
if (stv090x_set_max_srate(state, state->mclk, state->srate) < 0)
goto err;
if (stv090x_set_min_srate(state, state->mclk, state->srate) < 0)
goto err;
if (state->srate >= 10000000)
low_sr = 0;
else
low_sr = 1;
}
/* Setup tuner */
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_set_bbgain) {
if (state->config->tuner_set_bbgain(fe, 10) < 0) /* 10dB */
goto err;
}
if (state->config->tuner_set_frequency) {
if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
goto err;
}
if (state->config->tuner_set_bandwidth) {
if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
msleep(50);
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
if (state->config->tuner_get_status(fe, ®) < 0)
goto err;
}
if (reg)
dprintk(FE_DEBUG, 1, "Tuner phase locked");
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
msleep(10);
agc1_power = MAKEWORD16(STV090x_READ_DEMOD(state, AGCIQIN1),
STV090x_READ_DEMOD(state, AGCIQIN0));
if (agc1_power == 0) {
/* If AGC1 integrator value is 0
* then read POWERI, POWERQ
*/
for (i = 0; i < 5; i++) {
power_iq += (STV090x_READ_DEMOD(state, POWERI) +
STV090x_READ_DEMOD(state, POWERQ)) >> 1;
}
power_iq /= 5;
}
if ((agc1_power == 0) && (power_iq < STV090x_IQPOWER_THRESHOLD)) {
dprintk(FE_ERROR, 1, "No Signal: POWER_IQ=0x%02x", power_iq);
lock = 0;
} else {
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, SPECINV_CONTROL_FIELD, state->inversion);
if (state->dev_ver <= 0x20) {
/* rolloff to auto mode if DVBS2 */
STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 1);
} else {
/* DVB-S2 rolloff to auto mode if DVBS2 */
STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 1);
}
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (stv090x_delivery_search(state) < 0)
goto err;
if (state->algo != STV090x_BLIND_SEARCH) {
if (stv090x_start_search(state) < 0)
goto err;
}
}
/* need to check for AGC1 state */
if (state->algo == STV090x_BLIND_SEARCH)
lock = stv090x_blind_search(state);
else if (state->algo == STV090x_COLD_SEARCH)
lock = stv090x_get_coldlock(state, timeout_dmd);
else if (state->algo == STV090x_WARM_SEARCH)
lock = stv090x_get_dmdlock(state, timeout_dmd);
if ((!lock) && (state->algo == STV090x_COLD_SEARCH)) {
if (!low_sr) {
if (stv090x_chk_tmg(state))
lock = stv090x_sw_algo(state);
}
}
if (lock)
signal_state = stv090x_get_sig_params(state);
if ((lock) && (signal_state == STV090x_RANGEOK)) { /* signal within Range */
stv090x_optimize_track(state);
if (state->dev_ver >= 0x20) {
/* >= Cut 2.0 :release TS reset after
* demod lock and optimized Tracking
*/
reg = STV090x_READ_DEMOD(state, TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
msleep(3);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
goto err;
}
if (stv090x_get_lock(state, timeout_fec, timeout_fec)) {
lock = 1;
if (state->delsys == STV090x_DVBS2) {
stv090x_set_s2rolloff(state);
reg = STV090x_READ_DEMOD(state, PDELCTRL2);
STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 1);
if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
goto err;
/* Reset DVBS2 packet delinator error counter */
reg = STV090x_READ_DEMOD(state, PDELCTRL2);
STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 0);
if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67) < 0) /* PER */
goto err;
} else {
if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
goto err;
}
/* Reset the Total packet counter */
if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0x00) < 0)
goto err;
/* Reset the packet Error counter2 */
if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
goto err;
} else {
lock = 0;
signal_state = STV090x_NODATA;
no_signal = stv090x_chk_signal(state);
}
}
return signal_state;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum dvbfe_search stv090x_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
{
struct stv090x_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *props = &fe->dtv_property_cache;
state->delsys = props->delivery_system;
state->frequency = p->frequency;
state->srate = p->u.qpsk.symbol_rate;
state->search_mode = STV090x_SEARCH_AUTO;
state->algo = STV090x_COLD_SEARCH;
state->fec = STV090x_PRERR;
state->search_range = 2000000;
if (stv090x_algo(state) == STV090x_RANGEOK) {
dprintk(FE_DEBUG, 1, "Search success!");
return DVBFE_ALGO_SEARCH_SUCCESS;
} else {
dprintk(FE_DEBUG, 1, "Search failed!");
return DVBFE_ALGO_SEARCH_FAILED;
}
return DVBFE_ALGO_SEARCH_ERROR;
}
/* FIXME! */
static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
u8 search_state;
reg = STV090x_READ_DEMOD(state, DMDSTATE);
search_state = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
switch (search_state) {
case 0: /* searching */
case 1: /* first PLH detected */
default:
dprintk(FE_DEBUG, 1, "Status: Unlocked (Searching ..)");
*status = 0;
break;
case 2: /* DVB-S2 mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S2");
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, TSSTATUS);
if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
*status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
}
}
break;
case 3: /* DVB-S1/legacy mode */
dprintk(FE_DEBUG, 1, "Delivery system: DVB-S");
reg = STV090x_READ_DEMOD(state, DSTATUS);
if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
if (STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD)) {
reg = STV090x_READ_DEMOD(state, TSSTATUS);
if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
*status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
}
}
}
break;
}
return 0;
}
static int stv090x_read_per(struct dvb_frontend *fe, u32 *per)
{
struct stv090x_state *state = fe->demodulator_priv;
s32 count_4, count_3, count_2, count_1, count_0, count;
u32 reg, h, m, l;
enum fe_status status;
stv090x_read_status(fe, &status);
if (!(status & FE_HAS_LOCK)) {
*per = 1 << 23; /* Max PER */
} else {
/* Counter 2 */
reg = STV090x_READ_DEMOD(state, ERRCNT22);
h = STV090x_GETFIELD_Px(reg, ERR_CNT2_FIELD);
reg = STV090x_READ_DEMOD(state, ERRCNT21);
m = STV090x_GETFIELD_Px(reg, ERR_CNT21_FIELD);
reg = STV090x_READ_DEMOD(state, ERRCNT20);
l = STV090x_GETFIELD_Px(reg, ERR_CNT20_FIELD);
*per = ((h << 16) | (m << 8) | l);
count_4 = STV090x_READ_DEMOD(state, FBERCPT4);
count_3 = STV090x_READ_DEMOD(state, FBERCPT3);
count_2 = STV090x_READ_DEMOD(state, FBERCPT2);
count_1 = STV090x_READ_DEMOD(state, FBERCPT1);
count_0 = STV090x_READ_DEMOD(state, FBERCPT0);
if ((!count_4) && (!count_3)) {
count = (count_2 & 0xff) << 16;
count |= (count_1 & 0xff) << 8;
count |= count_0 & 0xff;
} else {
count = 1 << 24;
}
if (count == 0)
*per = 1;
}
if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_table_lookup(const struct stv090x_tab *tab, int max, int val)
{
int res = 0;
int min = 0, med;
if (val < tab[min].read)
res = tab[min].real;
else if (val >= tab[max].read)
res = tab[max].real;
else {
while ((max - min) > 1) {
med = (max + min) / 2;
if (val >= tab[min].read && val < tab[med].read)
max = med;
else
min = med;
}
res = ((val - tab[min].read) *
(tab[max].real - tab[min].real) /
(tab[max].read - tab[min].read)) +
tab[min].real;
}
return res;
}
static int stv090x_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
s32 agc;
reg = STV090x_READ_DEMOD(state, AGCIQIN1);
agc = STV090x_GETFIELD_Px(reg, AGCIQ_VALUE_FIELD);
*strength = stv090x_table_lookup(stv090x_rf_tab, ARRAY_SIZE(stv090x_rf_tab) - 1, agc);
if (agc > stv090x_rf_tab[0].read)
*strength = 5;
else if (agc < stv090x_rf_tab[ARRAY_SIZE(stv090x_rf_tab) - 1].read)
*strength = -100;
return 0;
}
static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg_0, reg_1, reg, i;
s32 val_0, val_1, val = 0;
u8 lock_f;
switch (state->delsys) {
case STV090x_DVBS2:
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
if (lock_f) {
msleep(5);
for (i = 0; i < 16; i++) {
reg_1 = STV090x_READ_DEMOD(state, NNOSPLHT1);
val_1 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
reg_0 = STV090x_READ_DEMOD(state, NNOSPLHT0);
val_0 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
val += MAKEWORD16(val_1, val_0);
msleep(1);
}
val /= 16;
*cnr = stv090x_table_lookup(stv090x_s2cn_tab, ARRAY_SIZE(stv090x_s2cn_tab) - 1, val);
if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s2cn_tab) - 1].read)
*cnr = 1000;
}
break;
case STV090x_DVBS1:
case STV090x_DSS:
reg = STV090x_READ_DEMOD(state, DSTATUS);
lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
if (lock_f) {
msleep(5);
for (i = 0; i < 16; i++) {
reg_1 = STV090x_READ_DEMOD(state, NOSDATAT1);
val_1 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
reg_0 = STV090x_READ_DEMOD(state, NOSDATAT0);
val_0 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
val += MAKEWORD16(val_1, val_0);
msleep(1);
}
val /= 16;
*cnr = stv090x_table_lookup(stv090x_s1cn_tab, ARRAY_SIZE(stv090x_s1cn_tab) - 1, val);
if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s1cn_tab) - 1].read)
*cnr = 1000;
}
break;
default:
break;
}
return 0;
}
static int stv090x_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
switch (tone) {
case SEC_TONE_ON:
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
break;
case SEC_TONE_OFF:
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
break;
default:
return -EINVAL;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static enum dvbfe_algo stv090x_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_CUSTOM;
}
static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg, idle = 0, fifo_full = 1;
int i;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 2);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
for (i = 0; i < cmd->msg_len; i++) {
while (fifo_full) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
}
if (STV090x_WRITE_DEMOD(state, DISTXDATA, cmd->msg[i]) < 0)
goto err;
}
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
i = 0;
while ((!idle) && (i < 10)) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
msleep(10);
i++;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg, idle = 0, fifo_full = 1;
u8 mode, value;
int i;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
if (burst == SEC_MINI_A) {
mode = 3;
value = 0x00;
} else {
mode = 2;
value = 0xFF;
}
STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, mode);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
while (fifo_full) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
}
if (STV090x_WRITE_DEMOD(state, DISTXDATA, value) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DISTXCTL);
STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
i = 0;
while ((!idle) && (i < 10)) {
reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
msleep(10);
i++;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg = 0, i = 0, rx_end = 0;
while ((rx_end != 1) && (i < 10)) {
msleep(10);
i++;
reg = STV090x_READ_DEMOD(state, DISRX_ST0);
rx_end = STV090x_GETFIELD_Px(reg, RX_END_FIELD);
}
if (rx_end) {
reply->msg_len = STV090x_GETFIELD_Px(reg, FIFO_BYTENBR_FIELD);
for (i = 0; i < reply->msg_len; i++)
reply->msg[i] = STV090x_READ_DEMOD(state, DISRXDATA);
}
return 0;
}
static int stv090x_sleep(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
dprintk(FE_DEBUG, 1, "Set %s to sleep",
state->device == STV0900 ? "STV0900" : "STV0903");
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_wakeup(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
dprintk(FE_DEBUG, 1, "Wake %s from standby",
state->device == STV0900 ? "STV0900" : "STV0903");
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
STV090x_SETFIELD(reg, STANDBY_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_TSTTNR1);
STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static void stv090x_release(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
kfree(state);
}
static int stv090x_ldpc_mode(struct stv090x_state *state, enum stv090x_mode ldpc_mode)
{
u32 reg = 0;
switch (ldpc_mode) {
case STV090x_DUAL:
default:
if ((state->demod_mode != STV090x_DUAL) || (STV090x_GETFIELD(reg, DDEMOD_FIELD) != 1)) {
/* set LDPC to dual mode */
if (stv090x_write_reg(state, STV090x_GENCFG, 0x1d) < 0)
goto err;
state->demod_mode = STV090x_DUAL;
reg = stv090x_read_reg(state, STV090x_TSTRES0);
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
goto err;
}
break;
case STV090x_SINGLE:
if (stv090x_stop_modcod(state) < 0)
goto err;
if (stv090x_activate_modcod_single(state) < 0)
goto err;
if (state->demod == STV090x_DEMODULATOR_1) {
if (stv090x_write_reg(state, STV090x_GENCFG, 0x06) < 0) /* path 2 */
goto err;
} else {
if (stv090x_write_reg(state, STV090x_GENCFG, 0x04) < 0) /* path 1 */
goto err;
}
reg = stv090x_read_reg(state, STV090x_TSTRES0);
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, PDELCTRL1);
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x01);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x00);
if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
goto err;
break;
}
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
/* return (Hz), clk in Hz*/
static u32 stv090x_get_mclk(struct stv090x_state *state)
{
const struct stv090x_config *config = state->config;
u32 div, reg;
u8 ratio;
div = stv090x_read_reg(state, STV090x_NCOARSE);
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
ratio = STV090x_GETFIELD(reg, SELX1RATIO_FIELD) ? 4 : 6;
return (div + 1) * config->xtal / ratio; /* kHz */
}
static int stv090x_set_mclk(struct stv090x_state *state, u32 mclk, u32 clk)
{
const struct stv090x_config *config = state->config;
u32 reg, div, clk_sel;
reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
clk_sel = ((STV090x_GETFIELD(reg, SELX1RATIO_FIELD) == 1) ? 4 : 6);
div = ((clk_sel * mclk) / config->xtal) - 1;
reg = stv090x_read_reg(state, STV090x_NCOARSE);
STV090x_SETFIELD(reg, M_DIV_FIELD, div);
if (stv090x_write_reg(state, STV090x_NCOARSE, reg) < 0)
goto err;
state->mclk = stv090x_get_mclk(state);
/*Set the DiseqC frequency to 22KHz */
div = state->mclk / 704000;
if (STV090x_WRITE_DEMOD(state, F22TX, div) < 0)
goto err;
if (STV090x_WRITE_DEMOD(state, F22RX, div) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_set_tspath(struct stv090x_state *state)
{
u32 reg;
if (state->dev_ver >= 0x20) {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x06) < 0) /* Mux'd stream mode */
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P2_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P2_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
goto err;
break;
}
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
goto err;
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0a) < 0)
goto err;
break;
}
break;
}
} else {
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x16);
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 0);
if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
goto err;
break;
}
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
switch (state->config->ts2_mode) {
case STV090x_TSMODE_SERIAL_PUNCTURED:
case STV090x_TSMODE_SERIAL_CONTINUOUS:
default:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
break;
case STV090x_TSMODE_PARALLEL_PUNCTURED:
case STV090x_TSMODE_DVBCI:
stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x12);
break;
}
break;
}
}
switch (state->config->ts1_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
break;
default:
break;
}
switch (state->config->ts2_mode) {
case STV090x_TSMODE_PARALLEL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_DVBCI:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_PUNCTURED:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
case STV090x_TSMODE_SERIAL_CONTINUOUS:
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
break;
default:
break;
}
reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
goto err;
reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_init(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
const struct stv090x_config *config = state->config;
u32 reg;
if (stv090x_wakeup(fe) < 0) {
dprintk(FE_ERROR, 1, "Error waking device");
goto err;
}
if (stv090x_ldpc_mode(state, state->demod_mode) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, TNRCFG2);
STV090x_SETFIELD_Px(reg, TUN_IQSWAP_FIELD, state->inversion);
if (STV090x_WRITE_DEMOD(state, TNRCFG2, reg) < 0)
goto err;
reg = STV090x_READ_DEMOD(state, DEMOD);
STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
goto err;
if (config->tuner_set_mode) {
if (config->tuner_set_mode(fe, TUNER_WAKE) < 0)
goto err;
}
if (config->tuner_init) {
if (config->tuner_init(fe) < 0)
goto err;
}
if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
goto err;
if (stv090x_set_tspath(state) < 0)
goto err;
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static int stv090x_setup(struct dvb_frontend *fe)
{
struct stv090x_state *state = fe->demodulator_priv;
const struct stv090x_config *config = state->config;
const struct stv090x_reg *stv090x_initval = NULL;
const struct stv090x_reg *stv090x_cut20_val = NULL;
unsigned long t1_size = 0, t2_size = 0;
u32 reg = 0;
int i;
if (state->device == STV0900) {
dprintk(FE_DEBUG, 1, "Initializing STV0900");
stv090x_initval = stv0900_initval;
t1_size = ARRAY_SIZE(stv0900_initval);
stv090x_cut20_val = stv0900_cut20_val;
t2_size = ARRAY_SIZE(stv0900_cut20_val);
} else if (state->device == STV0903) {
dprintk(FE_DEBUG, 1, "Initializing STV0903");
stv090x_initval = stv0903_initval;
t1_size = ARRAY_SIZE(stv0903_initval);
stv090x_cut20_val = stv0903_cut20_val;
t2_size = ARRAY_SIZE(stv0903_cut20_val);
}
/* STV090x init */
if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Stop Demod */
goto err;
msleep(5);
if (STV090x_WRITE_DEMOD(state, TNRCFG, 0x6c) < 0) /* check register ! (No Tuner Mode) */
goto err;
STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0) /* repeater OFF */
goto err;
if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
goto err;
msleep(5);
if (stv090x_write_reg(state, STV090x_I2CCFG, 0x08) < 0) /* 1/41 oversampling */
goto err;
if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0) /* enable PLL */
goto err;
msleep(5);
/* write initval */
dprintk(FE_DEBUG, 1, "Setting up initial values");
for (i = 0; i < t1_size; i++) {
if (stv090x_write_reg(state, stv090x_initval[i].addr, stv090x_initval[i].data) < 0)
goto err;
}
state->dev_ver = stv090x_read_reg(state, STV090x_MID);
if (state->dev_ver >= 0x20) {
if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
goto err;
/* write cut20_val*/
dprintk(FE_DEBUG, 1, "Setting up Cut 2.0 initial values");
for (i = 0; i < t2_size; i++) {
if (stv090x_write_reg(state, stv090x_cut20_val[i].addr, stv090x_cut20_val[i].data) < 0)
goto err;
}
} else if (state->dev_ver < 0x20) {
dprintk(FE_ERROR, 1, "ERROR: Unsupported Cut: 0x%02x!",
state->dev_ver);
goto err;
} else if (state->dev_ver > 0x30) {
/* we shouldn't bail out from here */
dprintk(FE_ERROR, 1, "INFO: Cut: 0x%02x probably incomplete support!",
state->dev_ver);
}
if (stv090x_write_reg(state, STV090x_TSTRES0, 0x80) < 0)
goto err;
if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
goto err;
stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
msleep(5);
if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0)
goto err;
stv090x_get_mclk(state);
return 0;
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
}
static struct dvb_frontend_ops stv090x_ops = {
.info = {
.name = "STV090x Multistandard",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 0,
.frequency_tolerance = 0,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_2G_MODULATION
},
.release = stv090x_release,
.init = stv090x_init,
.sleep = stv090x_sleep,
.get_frontend_algo = stv090x_frontend_algo,
.i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
.diseqc_send_master_cmd = stv090x_send_diseqc_msg,
.diseqc_send_burst = stv090x_send_diseqc_burst,
.diseqc_recv_slave_reply = stv090x_recv_slave_reply,
.set_tone = stv090x_set_tone,
.search = stv090x_search,
.read_status = stv090x_read_status,
.read_ber = stv090x_read_per,
.read_signal_strength = stv090x_read_signal_strength,
.read_snr = stv090x_read_cnr
};
struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
struct i2c_adapter *i2c,
enum stv090x_demodulator demod)
{
struct stv090x_state *state = NULL;
state = kzalloc(sizeof (struct stv090x_state), GFP_KERNEL);
if (state == NULL)
goto error;
state->verbose = &verbose;
state->config = config;
state->i2c = i2c;
state->frontend.ops = stv090x_ops;
state->frontend.demodulator_priv = state;
state->demod = demod;
state->demod_mode = config->demod_mode; /* Single or Dual mode */
state->device = config->device;
state->rolloff = STV090x_RO_35; /* default */
if (state->demod == STV090x_DEMODULATOR_0)
mutex_init(&demod_lock);
if (stv090x_sleep(&state->frontend) < 0) {
dprintk(FE_ERROR, 1, "Error putting device to sleep");
goto error;
}
if (stv090x_setup(&state->frontend) < 0) {
dprintk(FE_ERROR, 1, "Error setting up device");
goto error;
}
if (stv090x_wakeup(&state->frontend) < 0) {
dprintk(FE_ERROR, 1, "Error waking device");
goto error;
}
dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x\n",
state->device == STV0900 ? "STV0900" : "STV0903",
demod,
state->dev_ver);
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(stv090x_attach);
MODULE_PARM_DESC(verbose, "Set Verbosity level");
MODULE_AUTHOR("Manu Abraham");
MODULE_DESCRIPTION("STV090x Multi-Std Broadcast frontend");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vlolteanu/mptcp | drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c | 487 | 3936 | #include <core/os.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/graph.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv2a_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x033c, 0xffff0000);
nv_wo32(chan, 0x03a0, 0x0fff0000);
nv_wo32(chan, 0x03a4, 0x0fff0000);
nv_wo32(chan, 0x047c, 0x00000101);
nv_wo32(chan, 0x0490, 0x00000111);
nv_wo32(chan, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nv_wo32(chan, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nv_wo32(chan, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nv_wo32(chan, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x05a4, 0x4b7fffff);
nv_wo32(chan, 0x05fc, 0x00000001);
nv_wo32(chan, 0x0604, 0x00004000);
nv_wo32(chan, 0x0610, 0x00000001);
nv_wo32(chan, 0x0618, 0x00040000);
nv_wo32(chan, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
nv_wo32(chan, (i + 0), 0x10700ff9);
nv_wo32(chan, (i + 4), 0x0436086c);
nv_wo32(chan, (i + 8), 0x000c001b);
}
nv_wo32(chan, 0x269c, 0x3f800000);
nv_wo32(chan, 0x26b0, 0x3f800000);
nv_wo32(chan, 0x26dc, 0x40000000);
nv_wo32(chan, 0x26e0, 0x3f800000);
nv_wo32(chan, 0x26e4, 0x3f000000);
nv_wo32(chan, 0x26ec, 0x40000000);
nv_wo32(chan, 0x26f0, 0x3f800000);
nv_wo32(chan, 0x26f8, 0xbf800000);
nv_wo32(chan, 0x2700, 0xbf800000);
nv_wo32(chan, 0x3024, 0x000fe000);
nv_wo32(chan, 0x30a0, 0x000003f8);
nv_wo32(chan, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
nv_wo32(chan, i, 0x001c527c);
return 0;
}
static struct nouveau_oclass
nv2a_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x2a),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv2a_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv2a_graph_cclass;
nv_engine(priv)->sclass = nv25_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv2a_graph_oclass = {
.handle = NV_ENGINE(GR, 0x2a),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv2a_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv20_graph_init,
.fini = _nouveau_graph_fini,
},
};
| gpl-2.0 |
bestmjh47/android_kernel_a770k-stock | drivers/hid/hid-apple.c | 743 | 17642 | /*
* USB HID quirks support for Linux
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "hid-ids.h"
#define APPLE_RDESC_JIS 0x0001
#define APPLE_IGNORE_MOUSE 0x0002
#define APPLE_HAS_FN 0x0004
#define APPLE_HIDDEV 0x0008
#define APPLE_ISO_KEYBOARD 0x0010
#define APPLE_MIGHTYMOUSE 0x0020
#define APPLE_INVERT_HWHEEL 0x0040
#define APPLE_IGNORE_HIDINPUT 0x0080
#define APPLE_NUMLOCK_EMULATION 0x0100
#define APPLE_FLAG_FKEY 0x01
static unsigned int fnmode = 1;
module_param(fnmode, uint, 0644);
MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
"[1] = fkeyslast, 2 = fkeysfirst)");
static unsigned int iso_layout = 1;
module_param(iso_layout, uint, 0644);
MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
"(0 = disabled, [1] = enabled)");
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
DECLARE_BITMAP(pressed_fn, KEY_CNT);
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
struct apple_key_translation {
u16 from;
u16 to;
u8 flags;
};
static const struct apple_key_translation macbookair_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
{ KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
{ KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
{ KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
{ KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
{ KEY_F6, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
{ KEY_F7, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
{ KEY_F8, KEY_NEXTSONG, APPLE_FLAG_FKEY },
{ KEY_F9, KEY_MUTE, APPLE_FLAG_FKEY },
{ KEY_F10, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
{ KEY_F11, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
{ KEY_F12, KEY_EJECTCD, APPLE_FLAG_FKEY },
{ KEY_UP, KEY_PAGEUP },
{ KEY_DOWN, KEY_PAGEDOWN },
{ KEY_LEFT, KEY_HOME },
{ KEY_RIGHT, KEY_END },
{ }
};
static const struct apple_key_translation apple_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_ENTER, KEY_INSERT },
{ KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
{ KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
{ KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
{ KEY_F4, KEY_DASHBOARD, APPLE_FLAG_FKEY },
{ KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
{ KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
{ KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
{ KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
{ KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
{ KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
{ KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
{ KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
{ KEY_UP, KEY_PAGEUP },
{ KEY_DOWN, KEY_PAGEDOWN },
{ KEY_LEFT, KEY_HOME },
{ KEY_RIGHT, KEY_END },
{ }
};
static const struct apple_key_translation powerbook_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
{ KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
{ KEY_F3, KEY_MUTE, APPLE_FLAG_FKEY },
{ KEY_F4, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
{ KEY_F5, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
{ KEY_F6, KEY_NUMLOCK, APPLE_FLAG_FKEY },
{ KEY_F7, KEY_SWITCHVIDEOMODE, APPLE_FLAG_FKEY },
{ KEY_F8, KEY_KBDILLUMTOGGLE, APPLE_FLAG_FKEY },
{ KEY_F9, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
{ KEY_F10, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
{ KEY_UP, KEY_PAGEUP },
{ KEY_DOWN, KEY_PAGEDOWN },
{ KEY_LEFT, KEY_HOME },
{ KEY_RIGHT, KEY_END },
{ }
};
static const struct apple_key_translation powerbook_numlock_keys[] = {
{ KEY_J, KEY_KP1 },
{ KEY_K, KEY_KP2 },
{ KEY_L, KEY_KP3 },
{ KEY_U, KEY_KP4 },
{ KEY_I, KEY_KP5 },
{ KEY_O, KEY_KP6 },
{ KEY_7, KEY_KP7 },
{ KEY_8, KEY_KP8 },
{ KEY_9, KEY_KP9 },
{ KEY_M, KEY_KP0 },
{ KEY_DOT, KEY_KPDOT },
{ KEY_SLASH, KEY_KPPLUS },
{ KEY_SEMICOLON, KEY_KPMINUS },
{ KEY_P, KEY_KPASTERISK },
{ KEY_MINUS, KEY_KPEQUAL },
{ KEY_0, KEY_KPSLASH },
{ KEY_F6, KEY_NUMLOCK },
{ KEY_KPENTER, KEY_KPENTER },
{ KEY_BACKSPACE, KEY_BACKSPACE },
{ }
};
static const struct apple_key_translation apple_iso_keyboard[] = {
{ KEY_GRAVE, KEY_102ND },
{ KEY_102ND, KEY_GRAVE },
{ }
};
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
const struct apple_key_translation *trans;
/* Look for the translation */
for (trans = table; trans->from; trans++)
if (trans->from == from)
return trans;
return NULL;
}
static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
struct hid_usage *usage, __s32 value)
{
struct apple_sc *asc = hid_get_drvdata(hid);
const struct apple_key_translation *trans, *table;
if (usage->code == KEY_FN) {
asc->fn_on = !!value;
input_event(input, usage->type, usage->code, value);
return 1;
}
if (fnmode) {
int do_translate;
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
else if (hid->product < 0x21d || hid->product >= 0x300)
table = powerbook_fn_keys;
else
table = apple_fn_keys;
trans = apple_find_translation (table, usage->code);
if (trans) {
if (test_bit(usage->code, asc->pressed_fn))
do_translate = 1;
else if (trans->flags & APPLE_FLAG_FKEY)
do_translate = (fnmode == 2 && asc->fn_on) ||
(fnmode == 1 && !asc->fn_on);
else
do_translate = asc->fn_on;
if (do_translate) {
if (value)
set_bit(usage->code, asc->pressed_fn);
else
clear_bit(usage->code, asc->pressed_fn);
input_event(input, usage->type, trans->to,
value);
return 1;
}
}
if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
(test_bit(usage->code, asc->pressed_numlock) ||
test_bit(LED_NUML, input->led))) {
trans = apple_find_translation(powerbook_numlock_keys,
usage->code);
if (trans) {
if (value)
set_bit(usage->code,
asc->pressed_numlock);
else
clear_bit(usage->code,
asc->pressed_numlock);
input_event(input, usage->type, trans->to,
value);
}
return 1;
}
}
if (iso_layout) {
if (asc->quirks & APPLE_ISO_KEYBOARD) {
trans = apple_find_translation(apple_iso_keyboard, usage->code);
if (trans) {
input_event(input, usage->type, trans->to, value);
return 1;
}
}
}
return 0;
}
static int apple_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
!usage->type)
return 0;
if ((asc->quirks & APPLE_INVERT_HWHEEL) &&
usage->code == REL_HWHEEL) {
input_event(field->hidinput->input, usage->type, usage->code,
-value);
return 1;
}
if ((asc->quirks & APPLE_HAS_FN) &&
hidinput_apple_event(hdev, field->hidinput->input,
usage, value))
return 1;
return 0;
}
/*
* MacBook JIS keyboard has wrong logical maximum
*/
static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
hid_info(hdev,
"fixing up MacBook JIS keyboard report descriptor\n");
rdesc[53] = rdesc[59] = 0xe7;
}
return rdesc;
}
static void apple_setup_input(struct input_dev *input)
{
const struct apple_key_translation *trans;
set_bit(KEY_NUMLOCK, input->keybit);
/* Enable all needed keys */
for (trans = apple_fn_keys; trans->from; trans++)
set_bit(trans->to, input->keybit);
for (trans = powerbook_fn_keys; trans->from; trans++)
set_bit(trans->to, input->keybit);
for (trans = powerbook_numlock_keys; trans->from; trans++)
set_bit(trans->to, input->keybit);
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
apple_setup_input(hi->input);
return 1;
}
/* we want the hid layer to go through standard path (set and ignore) */
return 0;
}
static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
if (asc->quirks & APPLE_MIGHTYMOUSE) {
if (usage->hid == HID_GD_Z)
hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
else if (usage->code == BTN_1)
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_2);
else if (usage->code == BTN_2)
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_1);
}
return 0;
}
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
struct apple_sc *asc;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
int ret;
asc = kzalloc(sizeof(*asc), GFP_KERNEL);
if (asc == NULL) {
hid_err(hdev, "can't alloc apple descriptor\n");
return -ENOMEM;
}
asc->quirks = quirks;
hid_set_drvdata(hdev, asc);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_free;
}
if (quirks & APPLE_HIDDEV)
connect_mask |= HID_CONNECT_HIDDEV_FORCE;
if (quirks & APPLE_IGNORE_HIDINPUT)
connect_mask &= ~HID_CONNECT_HIDINPUT;
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err_free;
}
return 0;
err_free:
kfree(asc);
return ret;
}
static void apple_remove(struct hid_device *hdev)
{
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL),
.driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4),
.driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE),
.driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
APPLE_ISO_KEYBOARD },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ }
};
MODULE_DEVICE_TABLE(hid, apple_devices);
static struct hid_driver apple_driver = {
.name = "apple",
.id_table = apple_devices,
.report_fixup = apple_report_fixup,
.probe = apple_probe,
.remove = apple_remove,
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
};
static int __init apple_init(void)
{
int ret;
ret = hid_register_driver(&apple_driver);
if (ret)
pr_err("can't register apple driver\n");
return ret;
}
static void __exit apple_exit(void)
{
hid_unregister_driver(&apple_driver);
}
module_init(apple_init);
module_exit(apple_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
uclinux-cortexm/uclinux | drivers/gpu/drm/nouveau/nv17_tv_modes.c | 999 | 21838 | /*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
char *nv17_tv_norm_names[NUM_TV_NORMS] = {
[TV_NORM_PAL] = "PAL",
[TV_NORM_PAL_M] = "PAL-M",
[TV_NORM_PAL_N] = "PAL-N",
[TV_NORM_PAL_NC] = "PAL-Nc",
[TV_NORM_NTSC_M] = "NTSC-M",
[TV_NORM_NTSC_J] = "NTSC-J",
[TV_NORM_HD480I] = "hd480i",
[TV_NORM_HD480P] = "hd480p",
[TV_NORM_HD576I] = "hd576i",
[TV_NORM_HD576P] = "hd576p",
[TV_NORM_HD720P] = "hd720p",
[TV_NORM_HD1080I] = "hd1080i"
};
/* TV standard specific parameters */
struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
[TV_NORM_PAL] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_PAL_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_N] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD480I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD576I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_HD480P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10160004, 0x10060005, 0x1006000c, 0x10060020,
0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x70,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD576P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10060001, 0x10060009, 0x10060026, 0x10060027,
0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x69,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD720P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
0x66b0021, 0x6004a, 0x1210626, 0x8170000,
0x70004, 0x70016, 0x70017, 0x40f0018,
0x702e8, 0x81702ed, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_INTERLACE) },
.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
0x8940028, 0x60054, 0xe80870, 0xbf70000,
0xbc70004, 0x70005, 0x70012, 0x70013,
0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
0x1c70237, 0x70238, 0x70244, 0x70245,
0x40f0246, 0x70462, 0x1f70464, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
} } } }
};
/*
* The following is some guesswork on how the TV encoder flicker
* filter/rescaler works:
*
* It seems to use some sort of resampling filter, it is controlled
* through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
* control the horizontal and vertical stage respectively, there is
* also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
* but they seem to do nothing. A rough guess might be that they could
* be used to independently control the filtering of each interlaced
* field, but I don't know how they are enabled. The whole filtering
* process seems to be disabled with bits 26:27 of PTV_200, but we
* aren't doing that.
*
* The layout of both register sets is the same:
*
* A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
* B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
*
* Each coefficient is stored in bits [31],[15:9] in two's complement
* format. They seem to be some kind of weights used in a low-pass
* filter. Both A and B coefficients are applied to the 14 nearest
* samples on each side (Listed from nearest to furthermost. They
* roughly cover 2 framebuffer pixels on each side). They are
* probably multiplied with some more hardwired weights before being
* used: B-coefficients are applied the same on both sides,
* A-coefficients are inverted before being applied to the opposite
* side.
*
* After all the hassle, I got the following formula by empirical
* means...
*/
#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
#define id1 (1LL << 8)
#define id2 (1LL << 16)
#define id3 (1LL << 24)
#define id4 (1LL << 32)
#define id5 (1LL << 48)
static struct filter_params{
int64_t k1;
int64_t ki;
int64_t ki2;
int64_t ki3;
int64_t kr;
int64_t kir;
int64_t ki2r;
int64_t ki3r;
int64_t kf;
int64_t kif;
int64_t ki2f;
int64_t ki3f;
int64_t krf;
int64_t kirf;
int64_t ki2rf;
int64_t ki3rf;
} fparams[2][4] = {
/* Horizontal filter parameters */
{
{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
-8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
-37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
-41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
-154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
},
/* Vertical filter parameters */
{
{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
-3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
-9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
-2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
-38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
-17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
}
};
static void tv_setup_filter(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *mode = &encoder->crtc->mode;
uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
&tv_enc->state.vfilter};
int i, j, k;
int32_t overscan = calc_overscan(tv_enc->overscan);
int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
uint64_t rs[] = {mode->hdisplay * id3,
mode->vdisplay * id3};
do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
for (k = 0; k < 2; k++) {
rs[k] = max((int64_t)rs[k], id2);
for (j = 0; j < 4; j++) {
struct filter_params *p = &fparams[k][j];
for (i = 0; i < 7; i++) {
int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
+ (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
+ (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
+ (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
(*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
}
}
}
}
/* Hardware state saving/restoring */
static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
}
}
static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
}
}
void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
state->tv_enc[i] = nv_read_tv_enc(dev, i);
tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_save_ptv(dev, state, 200);
nv_save_ptv(dev, state, 204);
nv_save_ptv(dev, state, 208);
nv_save_ptv(dev, state, 20c);
nv_save_ptv(dev, state, 304);
nv_save_ptv(dev, state, 500);
nv_save_ptv(dev, state, 504);
nv_save_ptv(dev, state, 508);
nv_save_ptv(dev, state, 600);
nv_save_ptv(dev, state, 604);
nv_save_ptv(dev, state, 608);
nv_save_ptv(dev, state, 60c);
nv_save_ptv(dev, state, 610);
nv_save_ptv(dev, state, 614);
}
void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
nv_write_tv_enc(dev, i, state->tv_enc[i]);
tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_load_ptv(dev, state, 200);
nv_load_ptv(dev, state, 204);
nv_load_ptv(dev, state, 208);
nv_load_ptv(dev, state, 20c);
nv_load_ptv(dev, state, 304);
nv_load_ptv(dev, state, 500);
nv_load_ptv(dev, state, 504);
nv_load_ptv(dev, state, 508);
nv_load_ptv(dev, state, 600);
nv_load_ptv(dev, state, 604);
nv_load_ptv(dev, state, 608);
nv_load_ptv(dev, state, 60c);
nv_load_ptv(dev, state, 610);
nv_load_ptv(dev, state, 614);
/* This is required for some settings to kick in. */
nv_write_tv_enc(dev, 0x3e, 1);
nv_write_tv_enc(dev, 0x3e, 0);
}
/* Timings similar to the ones the blob sets */
struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{}
};
void nv17_tv_update_properties(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int subconnector = tv_enc->select_subconnector ?
tv_enc->select_subconnector :
tv_enc->subconnector;
switch (subconnector) {
case DRM_MODE_SUBCONNECTOR_Composite:
{
regs->ptv_204 = 0x2;
/* The composite connector may be found on either pin. */
if (tv_enc->pin_mask & 0x4)
regs->ptv_204 |= 0x010000;
else if (tv_enc->pin_mask & 0x2)
regs->ptv_204 |= 0x100000;
else
regs->ptv_204 |= 0x110000;
regs->tv_enc[0x7] = 0x10;
break;
}
case DRM_MODE_SUBCONNECTOR_SVIDEO:
regs->ptv_204 = 0x11012;
regs->tv_enc[0x7] = 0x18;
break;
case DRM_MODE_SUBCONNECTOR_Component:
regs->ptv_204 = 0x111333;
regs->tv_enc[0x7] = 0x14;
break;
case DRM_MODE_SUBCONNECTOR_SCART:
regs->ptv_204 = 0x111012;
regs->tv_enc[0x7] = 0x18;
break;
}
regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
tv_enc->saturation);
regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
tv_enc->saturation);
regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
nv_load_ptv(dev, regs, 204);
nv_load_tv_enc(dev, regs, 7);
nv_load_tv_enc(dev, regs, 20);
nv_load_tv_enc(dev, regs, 22);
nv_load_tv_enc(dev, regs, 25);
}
void nv17_tv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
tv_setup_filter(encoder);
nv_load_ptv(dev, regs, 208);
tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
}
void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
int overscan, hmargin, vmargin, hratio, vratio;
/* The rescaler doesn't do the right thing for interlaced modes. */
if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
overscan = 100;
else
overscan = tv_enc->overscan;
hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
overscan);
vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
overscan);
hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
regs->fp_horiz_regs[FP_VALID_START] = hmargin;
regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
regs->fp_vert_regs[FP_VALID_START] = vmargin;
regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
regs->fp_horiz_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
regs->fp_horiz_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
regs->fp_vert_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
regs->fp_vert_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
}
| gpl-2.0 |
arnoldthebat/linux-stable | drivers/oprofile/nmi_timer_int.c | 1767 | 3554 | /**
* @file nmi_timer_int.c
*
* @remark Copyright 2011 Advanced Micro Devices, Inc.
*
* @author Robert Richter <robert.richter@amd.com>
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/oprofile.h>
#include <linux/perf_event.h>
#ifdef CONFIG_OPROFILE_NMI_TIMER
static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
static int ctr_running;
static struct perf_event_attr nmi_timer_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
};
static void nmi_timer_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
event->hw.interrupts = 0; /* don't throttle interrupts */
oprofile_add_sample(regs, 0);
}
static int nmi_timer_start_cpu(int cpu)
{
struct perf_event *event = per_cpu(nmi_timer_events, cpu);
if (!event) {
event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
nmi_timer_callback, NULL);
if (IS_ERR(event))
return PTR_ERR(event);
per_cpu(nmi_timer_events, cpu) = event;
}
if (event && ctr_running)
perf_event_enable(event);
return 0;
}
static void nmi_timer_stop_cpu(int cpu)
{
struct perf_event *event = per_cpu(nmi_timer_events, cpu);
if (event && ctr_running)
perf_event_disable(event);
}
static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
int cpu = (unsigned long)data;
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
nmi_timer_start_cpu(cpu);
break;
case CPU_DOWN_PREPARE:
nmi_timer_stop_cpu(cpu);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block nmi_timer_cpu_nb = {
.notifier_call = nmi_timer_cpu_notifier
};
static int nmi_timer_start(void)
{
int cpu;
get_online_cpus();
ctr_running = 1;
for_each_online_cpu(cpu)
nmi_timer_start_cpu(cpu);
put_online_cpus();
return 0;
}
static void nmi_timer_stop(void)
{
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
nmi_timer_stop_cpu(cpu);
ctr_running = 0;
put_online_cpus();
}
static void nmi_timer_shutdown(void)
{
struct perf_event *event;
int cpu;
cpu_notifier_register_begin();
__unregister_cpu_notifier(&nmi_timer_cpu_nb);
for_each_possible_cpu(cpu) {
event = per_cpu(nmi_timer_events, cpu);
if (!event)
continue;
perf_event_disable(event);
per_cpu(nmi_timer_events, cpu) = NULL;
perf_event_release_kernel(event);
}
cpu_notifier_register_done();
}
static int nmi_timer_setup(void)
{
int cpu, err;
u64 period;
/* clock cycles per tick: */
period = (u64)cpu_khz * 1000;
do_div(period, HZ);
nmi_timer_attr.sample_period = period;
cpu_notifier_register_begin();
err = __register_cpu_notifier(&nmi_timer_cpu_nb);
if (err)
goto out;
/* can't attach events to offline cpus: */
for_each_online_cpu(cpu) {
err = nmi_timer_start_cpu(cpu);
if (err) {
cpu_notifier_register_done();
nmi_timer_shutdown();
return err;
}
}
out:
cpu_notifier_register_done();
return err;
}
int __init op_nmi_timer_init(struct oprofile_operations *ops)
{
int err = 0;
err = nmi_timer_setup();
if (err)
return err;
nmi_timer_shutdown(); /* only check, don't alloc */
ops->create_files = NULL;
ops->setup = nmi_timer_setup;
ops->shutdown = nmi_timer_shutdown;
ops->start = nmi_timer_start;
ops->stop = nmi_timer_stop;
ops->cpu_type = "timer";
printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
return 0;
}
#endif
| gpl-2.0 |
kbukin1/pnotify-linux-3.18.9 | drivers/oprofile/nmi_timer_int.c | 1767 | 3554 | /**
* @file nmi_timer_int.c
*
* @remark Copyright 2011 Advanced Micro Devices, Inc.
*
* @author Robert Richter <robert.richter@amd.com>
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/oprofile.h>
#include <linux/perf_event.h>
#ifdef CONFIG_OPROFILE_NMI_TIMER
static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
static int ctr_running;
static struct perf_event_attr nmi_timer_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
};
static void nmi_timer_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
event->hw.interrupts = 0; /* don't throttle interrupts */
oprofile_add_sample(regs, 0);
}
static int nmi_timer_start_cpu(int cpu)
{
struct perf_event *event = per_cpu(nmi_timer_events, cpu);
if (!event) {
event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
nmi_timer_callback, NULL);
if (IS_ERR(event))
return PTR_ERR(event);
per_cpu(nmi_timer_events, cpu) = event;
}
if (event && ctr_running)
perf_event_enable(event);
return 0;
}
static void nmi_timer_stop_cpu(int cpu)
{
struct perf_event *event = per_cpu(nmi_timer_events, cpu);
if (event && ctr_running)
perf_event_disable(event);
}
static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
int cpu = (unsigned long)data;
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
nmi_timer_start_cpu(cpu);
break;
case CPU_DOWN_PREPARE:
nmi_timer_stop_cpu(cpu);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block nmi_timer_cpu_nb = {
.notifier_call = nmi_timer_cpu_notifier
};
static int nmi_timer_start(void)
{
int cpu;
get_online_cpus();
ctr_running = 1;
for_each_online_cpu(cpu)
nmi_timer_start_cpu(cpu);
put_online_cpus();
return 0;
}
static void nmi_timer_stop(void)
{
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
nmi_timer_stop_cpu(cpu);
ctr_running = 0;
put_online_cpus();
}
static void nmi_timer_shutdown(void)
{
struct perf_event *event;
int cpu;
cpu_notifier_register_begin();
__unregister_cpu_notifier(&nmi_timer_cpu_nb);
for_each_possible_cpu(cpu) {
event = per_cpu(nmi_timer_events, cpu);
if (!event)
continue;
perf_event_disable(event);
per_cpu(nmi_timer_events, cpu) = NULL;
perf_event_release_kernel(event);
}
cpu_notifier_register_done();
}
static int nmi_timer_setup(void)
{
int cpu, err;
u64 period;
/* clock cycles per tick: */
period = (u64)cpu_khz * 1000;
do_div(period, HZ);
nmi_timer_attr.sample_period = period;
cpu_notifier_register_begin();
err = __register_cpu_notifier(&nmi_timer_cpu_nb);
if (err)
goto out;
/* can't attach events to offline cpus: */
for_each_online_cpu(cpu) {
err = nmi_timer_start_cpu(cpu);
if (err) {
cpu_notifier_register_done();
nmi_timer_shutdown();
return err;
}
}
out:
cpu_notifier_register_done();
return err;
}
int __init op_nmi_timer_init(struct oprofile_operations *ops)
{
int err = 0;
err = nmi_timer_setup();
if (err)
return err;
nmi_timer_shutdown(); /* only check, don't alloc */
ops->create_files = NULL;
ops->setup = nmi_timer_setup;
ops->shutdown = nmi_timer_shutdown;
ops->start = nmi_timer_start;
ops->stop = nmi_timer_stop;
ops->cpu_type = "timer";
printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
return 0;
}
#endif
| gpl-2.0 |
troth/linux-kernel | arch/arm/mach-s3c24xx/s3c2442.c | 2279 | 4535 | /* linux/arch/arm/mach-s3c2442/s3c2442.c
*
* Copyright (c) 2004-2005 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2442 core and lock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
#include "common.h"
/* S3C2442 extended clock support */
static unsigned long s3c2442_camif_upll_round(struct clk *clk,
unsigned long rate)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
int div;
if (rate > parent_rate)
return parent_rate;
div = parent_rate / rate;
if (div == 3)
return parent_rate / 3;
/* note, we remove the +/- 1 calculations for the divisor */
div /= 2;
if (div < 1)
div = 1;
else if (div > 16)
div = 16;
return parent_rate / (div * 2);
}
static int s3c2442_camif_upll_setrate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = clk_get_rate(clk->parent);
unsigned long camdivn = __raw_readl(S3C2440_CAMDIVN);
rate = s3c2442_camif_upll_round(clk, rate);
camdivn &= ~S3C2442_CAMDIVN_CAMCLK_DIV3;
if (rate == parent_rate) {
camdivn &= ~S3C2440_CAMDIVN_CAMCLK_SEL;
} else if ((parent_rate / rate) == 3) {
camdivn |= S3C2440_CAMDIVN_CAMCLK_SEL;
camdivn |= S3C2442_CAMDIVN_CAMCLK_DIV3;
} else {
camdivn &= ~S3C2440_CAMDIVN_CAMCLK_MASK;
camdivn |= S3C2440_CAMDIVN_CAMCLK_SEL;
camdivn |= (((parent_rate / rate) / 2) - 1);
}
__raw_writel(camdivn, S3C2440_CAMDIVN);
return 0;
}
/* Extra S3C2442 clocks */
static struct clk s3c2442_clk_cam = {
.name = "camif",
.id = -1,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
static struct clk s3c2442_clk_cam_upll = {
.name = "camif-upll",
.id = -1,
.ops = &(struct clk_ops) {
.set_rate = s3c2442_camif_upll_setrate,
.round_rate = s3c2442_camif_upll_round,
},
};
static int s3c2442_clk_add(struct device *dev, struct subsys_interface *sif)
{
struct clk *clock_upll;
struct clk *clock_h;
struct clk *clock_p;
clock_p = clk_get(NULL, "pclk");
clock_h = clk_get(NULL, "hclk");
clock_upll = clk_get(NULL, "upll");
if (IS_ERR(clock_p) || IS_ERR(clock_h) || IS_ERR(clock_upll)) {
printk(KERN_ERR "S3C2442: Failed to get parent clocks\n");
return -EINVAL;
}
s3c2442_clk_cam.parent = clock_h;
s3c2442_clk_cam_upll.parent = clock_upll;
s3c24xx_register_clock(&s3c2442_clk_cam);
s3c24xx_register_clock(&s3c2442_clk_cam_upll);
clk_disable(&s3c2442_clk_cam);
return 0;
}
static struct subsys_interface s3c2442_clk_interface = {
.name = "s3c2442_clk",
.subsys = &s3c2442_subsys,
.add_dev = s3c2442_clk_add,
};
static __init int s3c2442_clk_init(void)
{
return subsys_interface_register(&s3c2442_clk_interface);
}
arch_initcall(s3c2442_clk_init);
static struct device s3c2442_dev = {
.bus = &s3c2442_subsys,
};
int __init s3c2442_init(void)
{
printk("S3C2442: Initialising architecture\n");
#ifdef CONFIG_PM
register_syscore_ops(&s3c2410_pm_syscore_ops);
register_syscore_ops(&s3c24xx_irq_syscore_ops);
#endif
register_syscore_ops(&s3c244x_pm_syscore_ops);
return device_register(&s3c2442_dev);
}
void __init s3c2442_map_io(void)
{
s3c244x_map_io();
s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1down;
s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1down;
}
| gpl-2.0 |
VanirAOSP/kernel_samsung_golden | arch/arm/mach-imx/mx31lite-db.c | 2279 | 5233 | /*
* LogicPD i.MX31 SOM-LV development board support
*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*
* based on code for other MX31 boards,
*
* Copyright 2005-2007 Freescale Semiconductor
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
* Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx3.h>
#include <mach/board-mx31lite.h>
#include "devices-imx31.h"
/*
* This file contains board-specific initialization routines for the
* LogicPD i.MX31 SOM-LV development board, aka 'LiteKit'.
* If you design an own baseboard for the module, use this file as base
* for support code.
*/
static unsigned int litekit_db_board_pins[] __initdata = {
/* UART1 */
MX31_PIN_CTS1__CTS1,
MX31_PIN_RTS1__RTS1,
MX31_PIN_TXD1__TXD1,
MX31_PIN_RXD1__RXD1,
/* SPI 0 */
MX31_PIN_CSPI1_SCLK__SCLK,
MX31_PIN_CSPI1_MOSI__MOSI,
MX31_PIN_CSPI1_MISO__MISO,
MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
MX31_PIN_CSPI1_SS0__SS0,
MX31_PIN_CSPI1_SS1__SS1,
MX31_PIN_CSPI1_SS2__SS2,
/* SDHC1 */
MX31_PIN_SD1_DATA0__SD1_DATA0,
MX31_PIN_SD1_DATA1__SD1_DATA1,
MX31_PIN_SD1_DATA2__SD1_DATA2,
MX31_PIN_SD1_DATA3__SD1_DATA3,
MX31_PIN_SD1_CLK__SD1_CLK,
MX31_PIN_SD1_CMD__SD1_CMD,
};
/* UART */
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
/* MMC */
static int gpio_det, gpio_wp;
#define MMC_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
PAD_CTL_ODE_CMOS)
static int mxc_mmc1_get_ro(struct device *dev)
{
return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_GPIO1_6));
}
static int mxc_mmc1_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
int ret;
gpio_det = IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1);
gpio_wp = IOMUX_TO_GPIO(MX31_PIN_GPIO1_6);
mxc_iomux_set_pad(MX31_PIN_SD1_DATA0,
MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU);
mxc_iomux_set_pad(MX31_PIN_SD1_DATA1,
MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU);
mxc_iomux_set_pad(MX31_PIN_SD1_DATA2,
MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU);
mxc_iomux_set_pad(MX31_PIN_SD1_DATA3,
MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU);
mxc_iomux_set_pad(MX31_PIN_SD1_CMD,
MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU);
mxc_iomux_set_pad(MX31_PIN_SD1_CLK, MMC_PAD_CFG);
ret = gpio_request(gpio_det, "MMC detect");
if (ret)
return ret;
ret = gpio_request(gpio_wp, "MMC w/p");
if (ret)
goto exit_free_det;
gpio_direction_input(gpio_det);
gpio_direction_input(gpio_wp);
ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), detect_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"MMC detect", data);
if (ret)
goto exit_free_wp;
return 0;
exit_free_wp:
gpio_free(gpio_wp);
exit_free_det:
gpio_free(gpio_det);
return ret;
}
static void mxc_mmc1_exit(struct device *dev, void *data)
{
gpio_free(gpio_det);
gpio_free(gpio_wp);
free_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), data);
}
static const struct imxmmc_platform_data mmc_pdata __initconst = {
.get_ro = mxc_mmc1_get_ro,
.init = mxc_mmc1_init,
.exit = mxc_mmc1_exit,
};
/* SPI */
static int spi_internal_chipselect[] = {
MXC_SPI_CS(0),
MXC_SPI_CS(1),
MXC_SPI_CS(2),
};
static const struct spi_imx_master spi0_pdata __initconst = {
.chipselect = spi_internal_chipselect,
.num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
};
/* GPIO LEDs */
static struct gpio_led litekit_leds[] = {
{
.name = "GPIO0",
.gpio = IOMUX_TO_GPIO(MX31_PIN_COMPARE),
.active_low = 1,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
},
{
.name = "GPIO1",
.gpio = IOMUX_TO_GPIO(MX31_PIN_CAPTURE),
.active_low = 1,
.default_state = LEDS_GPIO_DEFSTATE_OFF,
}
};
static struct gpio_led_platform_data litekit_led_platform_data = {
.leds = litekit_leds,
.num_leds = ARRAY_SIZE(litekit_leds),
};
static struct platform_device litekit_led_device = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &litekit_led_platform_data,
},
};
void __init mx31lite_db_init(void)
{
mxc_iomux_setup_multiple_pins(litekit_db_board_pins,
ARRAY_SIZE(litekit_db_board_pins),
"development board pins");
imx31_add_imx_uart0(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
imx31_add_spi_imx0(&spi0_pdata);
platform_device_register(&litekit_led_device);
imx31_add_imx2_wdt(NULL);
imx31_add_mxc_rtc(NULL);
}
| gpl-2.0 |
aloksinha2001/picuntu-3.0.8-alok | drivers/mtd/nand/h1910.c | 2791 | 4489 | /*
* drivers/mtd/nand/h1910.c
*
* Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
*
* Derived from drivers/mtd/nand/edb7312.c
* Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Overview:
* This is a device driver for the NAND flash device found on the
* iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
* a 128Mibit (16MiB x 8 bits) NAND flash device.
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */
#include <asm/sizes.h>
#include <mach/h1900-gpio.h>
#include <mach/ipaq.h>
/*
* MTD structure for EDB7312 board
*/
static struct mtd_info *h1910_nand_mtd = NULL;
/*
* Module stuff
*/
/*
* Define static partitions for flash device
*/
static struct mtd_partition partition_info[] = {
{name:"h1910 NAND Flash",
offset:0,
size:16 * 1024 * 1024}
};
#define NUM_PARTITIONS 1
/*
* hardware specific access to control-lines
*
* NAND_NCE: bit 0 - don't care
* NAND_CLE: bit 1 - address bit 2
* NAND_ALE: bit 2 - address bit 3
*/
static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
}
/*
* read device ready pin
*/
#if 0
static int h1910_device_ready(struct mtd_info *mtd)
{
return (GPLR(55) & GPIO_bit(55));
}
#endif
/*
* Main initialization routine
*/
static int __init h1910_init(void)
{
struct nand_chip *this;
const char *part_type = 0;
int mtd_parts_nb = 0;
struct mtd_partition *mtd_parts = 0;
void __iomem *nandaddr;
if (!machine_is_h1900())
return -ENODEV;
nandaddr = ioremap(0x08000000, 0x1000);
if (!nandaddr) {
printk("Failed to ioremap nand flash.\n");
return -ENOMEM;
}
/* Allocate memory for MTD device structure and private data */
h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!h1910_nand_mtd) {
printk("Unable to allocate h1910 NAND MTD device structure.\n");
iounmap((void *)nandaddr);
return -ENOMEM;
}
/* Get pointer to private data */
this = (struct nand_chip *)(&h1910_nand_mtd[1]);
/* Initialize structures */
memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
memset(this, 0, sizeof(struct nand_chip));
/* Link the private data with the MTD structure */
h1910_nand_mtd->priv = this;
h1910_nand_mtd->owner = THIS_MODULE;
/*
* Enable VPEN
*/
GPSR(37) = GPIO_bit(37);
/* insert callbacks */
this->IO_ADDR_R = nandaddr;
this->IO_ADDR_W = nandaddr;
this->cmd_ctrl = h1910_hwcontrol;
this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
/* 15 us command delay time */
this->chip_delay = 50;
this->ecc.mode = NAND_ECC_SOFT;
this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(h1910_nand_mtd, 1)) {
printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
kfree(h1910_nand_mtd);
iounmap((void *)nandaddr);
return -ENXIO;
}
#ifdef CONFIG_MTD_CMDLINE_PARTS
mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
if (mtd_parts_nb > 0)
part_type = "command line";
else
mtd_parts_nb = 0;
#endif
if (mtd_parts_nb == 0) {
mtd_parts = partition_info;
mtd_parts_nb = NUM_PARTITIONS;
part_type = "static";
}
/* Register the partitions */
printk(KERN_NOTICE "Using %s partition definition\n", part_type);
mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
/* Return happy */
return 0;
}
module_init(h1910_init);
/*
* Clean up routine
*/
static void __exit h1910_cleanup(void)
{
struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
/* Release resources, unregister device */
nand_release(h1910_nand_mtd);
/* Release io resource */
iounmap((void *)this->IO_ADDR_W);
/* Free the MTD device structure */
kfree(h1910_nand_mtd);
}
module_exit(h1910_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
| gpl-2.0 |
ko/android-omap-tuna | drivers/media/common/saa7146_fops.c | 3047 | 14058 | #include <media/saa7146_vv.h>
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
if (fh->resources & bit) {
DEB_D(("already allocated! want: 0x%02x, cur:0x%02x\n",bit,vv->resources));
/* have it already allocated */
return 1;
}
/* is it free? */
if (vv->resources & bit) {
DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
/* no, someone else uses it */
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
vv->resources |= bit;
DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
return 1;
}
void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
{
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
BUG_ON((fh->resources & bits) != bits);
fh->resources &= ~bits;
vv->resources &= ~bits;
DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
}
/********************************************************************************/
/* common dma functions */
void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q,
struct saa7146_buf *buf)
{
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
DEB_EE(("dev:%p, buf:%p\n",dev,buf));
BUG_ON(in_interrupt());
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
/********************************************************************************/
/* common buffer functions */
int saa7146_buffer_queue(struct saa7146_dev *dev,
struct saa7146_dmaqueue *q,
struct saa7146_buf *buf)
{
assert_spin_locked(&dev->slock);
DEB_EE(("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf));
BUG_ON(!q);
if (NULL == q->curr) {
q->curr = buf;
DEB_D(("immediately activating buffer %p\n", buf));
buf->activate(dev,buf,NULL);
} else {
list_add_tail(&buf->vb.queue,&q->queue);
buf->vb.state = VIDEOBUF_QUEUED;
DEB_D(("adding buffer %p to queue. (active buffer present)\n", buf));
}
return 0;
}
void saa7146_buffer_finish(struct saa7146_dev *dev,
struct saa7146_dmaqueue *q,
int state)
{
assert_spin_locked(&dev->slock);
DEB_EE(("dev:%p, dmaq:%p, state:%d\n", dev, q, state));
DEB_EE(("q->curr:%p\n",q->curr));
BUG_ON(!q->curr);
/* finish current buffer */
if (NULL == q->curr) {
DEB_D(("aiii. no current buffer\n"));
return;
}
q->curr->vb.state = state;
do_gettimeofday(&q->curr->vb.ts);
wake_up(&q->curr->vb.done);
q->curr = NULL;
}
void saa7146_buffer_next(struct saa7146_dev *dev,
struct saa7146_dmaqueue *q, int vbi)
{
struct saa7146_buf *buf,*next = NULL;
BUG_ON(!q);
DEB_INT(("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi));
assert_spin_locked(&dev->slock);
if (!list_empty(&q->queue)) {
/* activate next one from queue */
buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue);
list_del(&buf->vb.queue);
if (!list_empty(&q->queue))
next = list_entry(q->queue.next,struct saa7146_buf, vb.queue);
q->curr = buf;
DEB_INT(("next buffer: buf:%p, prev:%p, next:%p\n", buf, q->queue.prev,q->queue.next));
buf->activate(dev,buf,next);
} else {
DEB_INT(("no next buffer. stopping.\n"));
if( 0 != vbi ) {
/* turn off video-dma3 */
saa7146_write(dev,MC1, MASK_20);
} else {
/* nothing to do -- just prevent next video-dma1 transfer
by lowering the protection address */
// fixme: fix this for vflip != 0
saa7146_write(dev, PROT_ADDR1, 0);
saa7146_write(dev, MC2, (MASK_02|MASK_18));
/* write the address of the rps-program */
saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
/* turn on rps */
saa7146_write(dev, MC1, (MASK_12 | MASK_28));
/*
printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1));
printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
*/
}
del_timer(&q->timeout);
}
}
void saa7146_buffer_timeout(unsigned long data)
{
struct saa7146_dmaqueue *q = (struct saa7146_dmaqueue*)data;
struct saa7146_dev *dev = q->dev;
unsigned long flags;
DEB_EE(("dev:%p, dmaq:%p\n", dev, q));
spin_lock_irqsave(&dev->slock,flags);
if (q->curr) {
DEB_D(("timeout on %p\n", q->curr));
saa7146_buffer_finish(dev,q,VIDEOBUF_ERROR);
}
/* we don't restart the transfer here like other drivers do. when
a streaming capture is disabled, the timeout function will be
called for the current buffer. if we activate the next buffer now,
we mess up our capture logic. if a timeout occurs on another buffer,
then something is seriously broken before, so no need to buffer the
next capture IMHO... */
/*
saa7146_buffer_next(dev,q);
*/
spin_unlock_irqrestore(&dev->slock,flags);
}
/********************************************************************************/
/* file operations */
static int fops_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct saa7146_dev *dev = video_drvdata(file);
struct saa7146_fh *fh = NULL;
int result = 0;
enum v4l2_buf_type type;
DEB_EE(("file:%p, dev:%s\n", file, video_device_node_name(vdev)));
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
DEB_D(("using: %p\n",dev));
type = vdev->vfl_type == VFL_TYPE_GRABBER
? V4L2_BUF_TYPE_VIDEO_CAPTURE
: V4L2_BUF_TYPE_VBI_CAPTURE;
/* check if an extension is registered */
if( NULL == dev->ext ) {
DEB_S(("no extension registered for this device.\n"));
result = -ENODEV;
goto out;
}
/* allocate per open data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
DEB_S(("cannot allocate memory for per open data.\n"));
result = -ENOMEM;
goto out;
}
file->private_data = fh;
fh->dev = dev;
fh->type = type;
if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
DEB_S(("initializing vbi...\n"));
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
result = saa7146_vbi_uops.open(dev,file);
if (dev->ext_vv_data->vbi_fops.open)
dev->ext_vv_data->vbi_fops.open(file);
} else {
DEB_S(("initializing video...\n"));
result = saa7146_video_uops.open(dev,file);
}
if (0 != result) {
goto out;
}
if( 0 == try_module_get(dev->ext->module)) {
result = -EINVAL;
goto out;
}
result = 0;
out:
if (fh && result != 0) {
kfree(fh);
file->private_data = NULL;
}
mutex_unlock(&saa7146_devices_lock);
return result;
}
static int fops_release(struct file *file)
{
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
DEB_EE(("file:%p\n", file));
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.release(dev,file);
if (dev->ext_vv_data->vbi_fops.release)
dev->ext_vv_data->vbi_fops.release(file);
} else {
saa7146_video_uops.release(dev,file);
}
module_put(dev->ext->module);
file->private_data = NULL;
kfree(fh);
mutex_unlock(&saa7146_devices_lock);
return 0;
}
static int fops_mmap(struct file *file, struct vm_area_struct * vma)
{
struct saa7146_fh *fh = file->private_data;
struct videobuf_queue *q;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",file, vma));
q = &fh->video_q;
break;
}
case V4L2_BUF_TYPE_VBI_CAPTURE: {
DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",file, vma));
q = &fh->vbi_q;
break;
}
default:
BUG();
return 0;
}
return videobuf_mmap_mapper(q,vma);
}
static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
DEB_EE(("file:%p, poll:%p\n",file, wait));
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if( 0 == fh->vbi_q.streaming )
return videobuf_poll_stream(file, &fh->vbi_q, wait);
q = &fh->vbi_q;
} else {
DEB_D(("using video queue.\n"));
q = &fh->video_q;
}
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
if (!buf) {
DEB_D(("buf == NULL!\n"));
return POLLERR;
}
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
DEB_D(("poll succeeded!\n"));
return POLLIN|POLLRDNORM;
}
DEB_D(("nothing to poll for, buf->state:%d\n",buf->state));
return 0;
}
static ssize_t fops_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct saa7146_fh *fh = file->private_data;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
// DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun", file, data, (unsigned long)count));
return saa7146_video_uops.read(file,data,count,ppos);
}
case V4L2_BUF_TYPE_VBI_CAPTURE: {
// DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n", file, data, (unsigned long)count));
if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
return saa7146_vbi_uops.read(file,data,count,ppos);
else
return -EINVAL;
}
break;
default:
BUG();
return 0;
}
}
static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
{
struct saa7146_fh *fh = file->private_data;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return -EINVAL;
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (fh->dev->ext_vv_data->vbi_fops.write)
return fh->dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
else
return -EINVAL;
default:
BUG();
return -EINVAL;
}
}
static const struct v4l2_file_operations video_fops =
{
.owner = THIS_MODULE,
.open = fops_open,
.release = fops_release,
.read = fops_read,
.write = fops_write,
.poll = fops_poll,
.mmap = fops_mmap,
.unlocked_ioctl = video_ioctl2,
};
static void vv_callback(struct saa7146_dev *dev, unsigned long status)
{
u32 isr = status;
DEB_INT(("dev:%p, isr:0x%08x\n",dev,(u32)status));
if (0 != (isr & (MASK_27))) {
DEB_INT(("irq: RPS0 (0x%08x).\n",isr));
saa7146_video_uops.irq_done(dev,isr);
}
if (0 != (isr & (MASK_28))) {
u32 mc2 = saa7146_read(dev, MC2);
if( 0 != (mc2 & MASK_15)) {
DEB_INT(("irq: RPS1 vbi workaround (0x%08x).\n",isr));
wake_up(&dev->vv_data->vbi_wq);
saa7146_write(dev,MC2, MASK_31);
return;
}
DEB_INT(("irq: RPS1 (0x%08x).\n",isr));
saa7146_vbi_uops.irq_done(dev,isr);
}
}
int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
{
struct saa7146_vv *vv;
int err;
err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev);
if (err)
return err;
vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
if (vv == NULL) {
ERR(("out of memory. aborting.\n"));
return -ENOMEM;
}
ext_vv->ops = saa7146_video_ioctl_ops;
ext_vv->core_ops = &saa7146_video_ioctl_ops;
DEB_EE(("dev:%p\n",dev));
/* set default values for video parts of the saa7146 */
saa7146_write(dev, BCS_CTRL, 0x80400040);
/* enable video-port pins */
saa7146_write(dev, MC1, (MASK_10 | MASK_26));
/* save per-device extension data (one extension can
handle different devices that might need different
configuration data) */
dev->ext_vv_data = ext_vv;
vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
if( NULL == vv->d_clipping.cpu_addr ) {
ERR(("out of memory. aborting.\n"));
kfree(vv);
return -1;
}
memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM);
saa7146_video_uops.init(dev,vv);
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.init(dev,vv);
dev->vv_data = vv;
dev->vv_callback = &vv_callback;
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_vv_init);
int saa7146_vv_release(struct saa7146_dev* dev)
{
struct saa7146_vv *vv = dev->vv_data;
DEB_EE(("dev:%p\n",dev));
v4l2_device_unregister(&dev->v4l2_dev);
pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle);
kfree(vv);
dev->vv_data = NULL;
dev->vv_callback = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_vv_release);
int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
char *name, int type)
{
struct video_device *vfd;
int err;
int i;
DEB_EE(("dev:%p, name:'%s', type:%d\n",dev,name,type));
// released by vfd->release
vfd = video_device_alloc();
if (vfd == NULL)
return -ENOMEM;
vfd->fops = &video_fops;
vfd->ioctl_ops = &dev->ext_vv_data->ops;
vfd->release = video_device_release;
vfd->lock = &dev->v4l2_lock;
vfd->tvnorms = 0;
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
strlcpy(vfd->name, name, sizeof(vfd->name));
video_set_drvdata(vfd, dev);
err = video_register_device(vfd, type, -1);
if (err < 0) {
ERR(("cannot register v4l2 device. skipping.\n"));
video_device_release(vfd);
return err;
}
INFO(("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(vfd)));
*vid = vfd;
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_register_device);
int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
{
DEB_EE(("dev:%p\n",dev));
video_unregister_device(*vid);
*vid = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(saa7146_unregister_device);
static int __init saa7146_vv_init_module(void)
{
return 0;
}
static void __exit saa7146_vv_cleanup_module(void)
{
}
module_init(saa7146_vv_init_module);
module_exit(saa7146_vv_cleanup_module);
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
MODULE_DESCRIPTION("video4linux driver for saa7146-based hardware");
MODULE_LICENSE("GPL");
| gpl-2.0 |
viaembedded/springboard-kernel-bsp | net/sctp/chunk.c | 5863 | 9887 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2003, 2004
*
* This file is part of the SCTP kernel implementation
*
* This file contains the code relating the chunk abstraction.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* This file is mostly in anticipation of future work, but initially
* populate with fragment tracking for an outbound message.
*/
/* Initialize datamsg from memory. */
static void sctp_datamsg_init(struct sctp_datamsg *msg)
{
atomic_set(&msg->refcnt, 1);
msg->send_failed = 0;
msg->send_error = 0;
msg->can_abandon = 0;
msg->can_delay = 1;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
}
/* Allocate and initialize datamsg. */
SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
{
struct sctp_datamsg *msg;
msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
if (msg) {
sctp_datamsg_init(msg);
SCTP_DBG_OBJCNT_INC(datamsg);
}
return msg;
}
void sctp_datamsg_free(struct sctp_datamsg *msg)
{
struct sctp_chunk *chunk;
/* This doesn't have to be a _safe vairant because
* sctp_chunk_free() only drops the refs.
*/
list_for_each_entry(chunk, &msg->chunks, frag_list)
sctp_chunk_free(chunk);
sctp_datamsg_put(msg);
}
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
struct list_head *pos, *temp;
struct sctp_chunk *chunk;
struct sctp_sock *sp;
struct sctp_ulpevent *ev;
struct sctp_association *asoc = NULL;
int error = 0, notify;
/* If we failed, we may need to notify. */
notify = msg->send_failed ? -1 : 0;
/* Release all references. */
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list);
/* Check whether we _really_ need to notify. */
if (notify < 0) {
asoc = chunk->asoc;
if (msg->send_error)
error = msg->send_error;
else
error = asoc->outqueue.error;
sp = sctp_sk(asoc->base.sk);
notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED,
&sp->subscribe);
}
/* Generate a SEND FAILED event only if enabled. */
if (notify > 0) {
int sent;
if (chunk->has_tsn)
sent = SCTP_DATA_SENT;
else
sent = SCTP_DATA_UNSENT;
ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
error, GFP_ATOMIC);
if (ev)
sctp_ulpq_tail_event(&asoc->ulpq, ev);
}
sctp_chunk_put(chunk);
}
SCTP_DBG_OBJCNT_DEC(datamsg);
kfree(msg);
}
/* Hold a reference. */
static void sctp_datamsg_hold(struct sctp_datamsg *msg)
{
atomic_inc(&msg->refcnt);
}
/* Release a reference. */
void sctp_datamsg_put(struct sctp_datamsg *msg)
{
if (atomic_dec_and_test(&msg->refcnt))
sctp_datamsg_destroy(msg);
}
/* Assign a chunk to this datamsg. */
static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
{
sctp_datamsg_hold(msg);
chunk->msg = msg;
}
/* A data chunk can have a maximum payload of (2^16 - 20). Break
* down any such message into smaller chunks. Opportunistically, fragment
* the chunks down to the current MTU constraints. We may get refragmented
* later if the PMTU changes, but it is _much better_ to fragment immediately
* with a reasonable guess than always doing our fragmentation on the
* soft-interrupt.
*/
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo,
struct msghdr *msgh, int msg_len)
{
int max, whole, i, offset, over, err;
int len, first_len;
int max_data;
struct sctp_chunk *chunk;
struct sctp_datamsg *msg;
struct list_head *pos, *temp;
__u8 frag;
msg = sctp_datamsg_new(GFP_KERNEL);
if (!msg)
return NULL;
/* Note: Calculate this outside of the loop, so that all fragments
* have the same expiration.
*/
if (sinfo->sinfo_timetolive) {
/* sinfo_timetolive is in milliseconds */
msg->expires_at = jiffies +
msecs_to_jiffies(sinfo->sinfo_timetolive);
msg->can_abandon = 1;
SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n",
__func__, msg, msg->expires_at, jiffies);
}
/* This is the biggest possible DATA chunk that can fit into
* the packet
*/
max_data = asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len -
sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
* we need to accound for bundling of the AUTH chunks along with
* DATA.
*/
if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
if (hmac_desc)
max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
hmac_desc->hmac_len);
}
/* Now, check if we need to reduce our max */
if (max > max_data)
max = max_data;
whole = 0;
first_len = max;
/* Check to see if we have a pending SACK and try to let it be bundled
* with this message. Do this if we don't have any data queued already.
* To check that, look at out_qlen and retransmit list.
* NOTE: we will not reduce to account for SACK, if the message would
* not have been fragmented.
*/
if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
asoc->outqueue.out_qlen == 0 &&
list_empty(&asoc->outqueue.retransmit) &&
msg_len > max)
max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
/* Encourage Cookie-ECHO bundling. */
if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
/* Now that we adjusted completely, reset first_len */
if (first_len > max_data)
first_len = max_data;
/* Account for a different sized first fragment */
if (msg_len >= first_len) {
msg_len -= first_len;
whole = 1;
msg->can_delay = 0;
}
/* How many full sized? How many bytes leftover? */
whole += msg_len / max;
over = msg_len % max;
offset = 0;
if ((whole > 1) || (whole && over))
SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
/* Create chunks for all the full sized DATA chunks. */
for (i=0, len=first_len; i < whole; i++) {
frag = SCTP_DATA_MIDDLE_FRAG;
if (0 == i)
frag |= SCTP_DATA_FIRST_FRAG;
if ((i == (whole - 1)) && !over) {
frag |= SCTP_DATA_LAST_FRAG;
/* The application requests to set the I-bit of the
* last DATA chunk of a user message when providing
* the user message to the SCTP implementation.
*/
if ((sinfo->sinfo_flags & SCTP_EOF) ||
(sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
frag |= SCTP_DATA_SACK_IMM;
}
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
if (!chunk)
goto errout;
err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
if (err < 0)
goto errout;
offset += len;
/* Put the chunk->skb back into the form expected by send. */
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
- (__u8 *)chunk->skb->data);
sctp_datamsg_assign(msg, chunk);
list_add_tail(&chunk->frag_list, &msg->chunks);
/* The first chunk, the first chunk was likely short
* to allow bundling, so reset to full size.
*/
if (0 == i)
len = max;
}
/* .. now the leftover bytes. */
if (over) {
if (!whole)
frag = SCTP_DATA_NOT_FRAG;
else
frag = SCTP_DATA_LAST_FRAG;
if ((sinfo->sinfo_flags & SCTP_EOF) ||
(sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
frag |= SCTP_DATA_SACK_IMM;
chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
if (!chunk)
goto errout;
err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
- (__u8 *)chunk->skb->data);
if (err < 0)
goto errout;
sctp_datamsg_assign(msg, chunk);
list_add_tail(&chunk->frag_list, &msg->chunks);
}
return msg;
errout:
list_for_each_safe(pos, temp, &msg->chunks) {
list_del_init(pos);
chunk = list_entry(pos, struct sctp_chunk, frag_list);
sctp_chunk_free(chunk);
}
sctp_datamsg_put(msg);
return NULL;
}
/* Check whether this message has expired. */
int sctp_chunk_abandoned(struct sctp_chunk *chunk)
{
struct sctp_datamsg *msg = chunk->msg;
if (!msg->can_abandon)
return 0;
if (time_after(jiffies, msg->expires_at))
return 1;
return 0;
}
/* This chunk (and consequently entire message) has failed in its sending. */
void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
{
chunk->msg->send_failed = 1;
chunk->msg->send_error = error;
}
| gpl-2.0 |
cr1exe/android_kernel_sony_taoshan | drivers/misc/sgi-xp/xpnet.c | 7911 | 17928 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Cross Partition Network Interface (XPNET) support
*
* XPNET provides a virtual network layered on top of the Cross
* Partition communication layer.
*
* XPNET provides direct point-to-point and broadcast-like support
* for an ethernet-like device. The ethernet broadcast medium is
* replaced with a point-to-point message structure which passes
* pointers to a DMA-capable block that a remote partition should
* retrieve and pass to the upper level networking layer.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "xp.h"
/*
* The message payload transferred by XPC.
*
* buf_pa is the physical address where the DMA should pull from.
*
* NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
* cacheline boundary. To accomplish this, we record the number of
* bytes from the beginning of the first cacheline to the first useful
* byte of the skb (leadin_ignore) and the number of bytes from the
* last useful byte of the skb to the end of the last cacheline
* (tailout_ignore).
*
* size is the number of bytes to transfer which includes the skb->len
* (useful bytes of the senders skb) plus the leadin and tailout
*/
struct xpnet_message {
u16 version; /* Version for this message */
u16 embedded_bytes; /* #of bytes embedded in XPC message */
u32 magic; /* Special number indicating this is xpnet */
unsigned long buf_pa; /* phys address of buffer to retrieve */
u32 size; /* #of bytes in buffer */
u8 leadin_ignore; /* #of bytes to ignore at the beginning */
u8 tailout_ignore; /* #of bytes to ignore at the end */
unsigned char data; /* body of small packets */
};
/*
* Determine the size of our message, the cacheline aligned size,
* and then the number of message will request from XPC.
*
* XPC expects each message to exist in an individual cacheline.
*/
#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE
#define XPNET_MSG_DATA_MAX \
(XPNET_MSG_SIZE - offsetof(struct xpnet_message, data))
#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE)
#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
/*
* Version number of XPNET implementation. XPNET can always talk to versions
* with same major #, and never talk to versions with a different version.
*/
#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
#define XPNET_VERSION _XPNET_VERSION(1, 0) /* version 1.0 */
#define XPNET_VERSION_EMBED _XPNET_VERSION(1, 1) /* version 1.1 */
#define XPNET_MAGIC 0x88786984 /* "XNET" */
#define XPNET_VALID_MSG(_m) \
((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
&& (msg->magic == XPNET_MAGIC))
#define XPNET_DEVICE_NAME "xp0"
/*
* When messages are queued with xpc_send_notify, a kmalloc'd buffer
* of the following type is passed as a notification cookie. When the
* notification function is called, we use the cookie to decide
* whether all outstanding message sends have completed. The skb can
* then be released.
*/
struct xpnet_pending_msg {
struct sk_buff *skb;
atomic_t use_count;
};
struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
* our bitmask of partitions to which we broadcast.
*/
static unsigned long *xpnet_broadcast_partitions;
/* protect above */
static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/*
* Since the Block Transfer Engine (BTE) is being used for the transfer
* and it relies upon cache-line size transfers, we need to reserve at
* least one cache-line for head and tail alignment. The BTE is
* limited to 8MB transfers.
*
* Testing has shown that changing MTU to greater than 64KB has no effect
* on TCP as the two sides negotiate a Max Segment Size that is limited
* to 64K. Other protocols May use packets greater than this, but for
* now, the default is 64KB.
*/
#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
/* 32KB has been determined to be the ideal */
#define XPNET_DEF_MTU (0x8000UL)
/*
* The partid is encapsulated in the MAC address beginning in the following
* octet and it consists of two octets.
*/
#define XPNET_PARTID_OCTET 2
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
*/
static void
xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
void *dst;
enum xp_retval ret;
if (!XPNET_VALID_MSG(msg)) {
/*
* Packet with a different XPC version. Ignore.
*/
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
/* reserve an extra cache line */
skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
if (!skb) {
dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
msg->size + L1_CACHE_BYTES);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
/*
* The allocated skb has some reserved space.
* In order to use xp_remote_memcpy(), we need to get the
* skb->data pointer moved forward.
*/
skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
(L1_CACHE_BYTES - 1)) +
msg->leadin_ignore));
/*
* Update the tail pointer to indicate data actually
* transferred.
*/
skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
/*
* Move the data over from the other side.
*/
if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
(msg->embedded_bytes != 0)) {
dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
"%lu)\n", skb->data, &msg->data,
(size_t)msg->embedded_bytes);
skb_copy_to_linear_data(skb, &msg->data,
(size_t)msg->embedded_bytes);
} else {
dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1));
dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
"xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst,
(void *)msg->buf_pa, msg->size);
ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size);
if (ret != xpSuccess) {
/*
* !!! Need better way of cleaning skb. Currently skb
* !!! appears in_use and we can't just call
* !!! dev_kfree_skb.
*/
dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) "
"returned error=0x%x\n", dst,
(void *)msg->buf_pa, msg->size, ret);
xpc_received(partid, channel, (void *)msg);
xpnet_device->stats.rx_errors++;
return;
}
}
dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
skb->protocol = eth_type_trans(skb, xpnet_device);
skb->ip_summed = CHECKSUM_UNNECESSARY;
dev_dbg(xpnet, "passing skb to network layer\n"
"\tskb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n",
(void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->len);
xpnet_device->stats.rx_packets++;
xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
netif_rx_ni(skb);
xpc_received(partid, channel, (void *)msg);
}
/*
* This is the handler which XPC calls during any sort of change in
* state or message reception on a connection.
*/
static void
xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
void *data, void *key)
{
DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
DBUG_ON(channel != XPC_NET_CHANNEL);
switch (reason) {
case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *)data);
break;
case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
__set_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
netif_carrier_on(xpnet_device);
dev_dbg(xpnet, "%s connected to partition %d\n",
xpnet_device->name, partid);
break;
default:
spin_lock_bh(&xpnet_broadcast_lock);
__clear_bit(partid, xpnet_broadcast_partitions);
spin_unlock_bh(&xpnet_broadcast_lock);
if (bitmap_empty((unsigned long *)xpnet_broadcast_partitions,
xp_max_npartitions)) {
netif_carrier_off(xpnet_device);
}
dev_dbg(xpnet, "%s disconnected from partition %d\n",
xpnet_device->name, partid);
break;
}
}
static int
xpnet_dev_open(struct net_device *dev)
{
enum xp_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
(unsigned long)XPNET_MSG_SIZE,
(unsigned long)XPNET_MSG_NENTRIES,
(unsigned long)XPNET_MAX_KTHREADS,
(unsigned long)XPNET_MAX_IDLE_KTHREADS);
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
return -ENOMEM;
}
dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
return 0;
}
static int
xpnet_dev_stop(struct net_device *dev)
{
xpc_disconnect(XPC_NET_CHANNEL);
dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
return 0;
}
static int
xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* 68 comes from min TCP+IP+MAC header */
if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
"between 68 and %ld\n", dev->name, new_mtu,
XPNET_MAX_MTU);
return -EINVAL;
}
dev->mtu = new_mtu;
dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
return 0;
}
/*
* Notification that the other end has received the message and
* DMA'd the skb information. At this point, they are done with
* our side. When all recipients are done processing, we
* release the skb and then release our pending message structure.
*/
static void
xpnet_send_completed(enum xp_retval reason, short partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
DBUG_ON(queued_msg == NULL);
dev_dbg(xpnet, "message to %d notified with reason %d\n",
partid, reason);
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
(void *)queued_msg->skb->head);
dev_kfree_skb_any(queued_msg->skb);
kfree(queued_msg);
}
}
static void
xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg,
u64 start_addr, u64 end_addr, u16 embedded_bytes, int dest_partid)
{
u8 msg_buffer[XPNET_MSG_SIZE];
struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer;
u16 msg_size = sizeof(struct xpnet_message);
enum xp_retval ret;
msg->embedded_bytes = embedded_bytes;
if (unlikely(embedded_bytes != 0)) {
msg->version = XPNET_VERSION_EMBED;
dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
&msg->data, skb->data, (size_t)embedded_bytes);
skb_copy_from_linear_data(skb, &msg->data,
(size_t)embedded_bytes);
msg_size += embedded_bytes - 1;
} else {
msg->version = XPNET_VERSION;
}
msg->magic = XPNET_MAGIC;
msg->size = end_addr - start_addr;
msg->leadin_ignore = (u64)skb->data - start_addr;
msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb);
msg->buf_pa = xp_pa((void *)start_addr);
dev_dbg(xpnet, "sending XPC message to %d:%d\n"
"msg->buf_pa=0x%lx, msg->size=%u, "
"msg->leadin_ignore=%u, msg->tailout_ignore=%u\n",
dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size,
msg->leadin_ignore, msg->tailout_ignore);
atomic_inc(&queued_msg->use_count);
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg,
msg_size, xpnet_send_completed, queued_msg);
if (unlikely(ret != xpSuccess))
atomic_dec(&queued_msg->use_count);
}
/*
* Network layer has formatted a packet (skb) and is ready to place it
* "on the wire". Prepare and send an xpnet_message to all partitions
* which have connected with us and are targets of this packet.
*
* MAC-NOTE: For the XPNET driver, the MAC address contains the
* destination partid. If the destination partid octets are 0xffff,
* this packet is to be broadcast to all connected partitions.
*/
static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
u64 start_addr, end_addr;
short dest_partid;
u16 embedded_bytes = 0;
dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
"skb->end=0x%p skb->len=%d\n", (void *)skb->head,
(void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
skb->len);
if (skb->data[0] == 0x33) {
dev_kfree_skb(skb);
return NETDEV_TX_OK; /* nothing needed to be done */
}
/*
* The xpnet_pending_msg tracks how many outstanding
* xpc_send_notifies are relying on this skb. When none
* remain, release the skb.
*/
queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
if (queued_msg == NULL) {
dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
"packet\n", sizeof(struct xpnet_pending_msg));
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* get the beginning of the first cacheline and end of last */
start_addr = ((u64)skb->data & ~(L1_CACHE_BYTES - 1));
end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb));
/* calculate how many bytes to embed in the XPC message */
if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
/* skb->data does fit so embed */
embedded_bytes = skb->len;
}
/*
* Since the send occurs asynchronously, we set the count to one
* and begin sending. Any sends that happen to complete before
* we are done sending will not free the skb. We will be left
* with that task during exit. This also handles the case of
* a packet destined for a partition which is no longer up.
*/
atomic_set(&queued_msg->use_count, 1);
queued_msg->skb = skb;
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
} else {
dest_partid = (short)skb->data[XPNET_PARTID_OCTET + 1];
dest_partid |= (short)skb->data[XPNET_PARTID_OCTET + 0] << 8;
if (dest_partid >= 0 &&
dest_partid < xp_max_npartitions &&
test_bit(dest_partid, xpnet_broadcast_partitions) != 0) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
embedded_bytes, dest_partid);
}
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
if (atomic_dec_return(&queued_msg->use_count) == 0) {
dev_kfree_skb(skb);
kfree(queued_msg);
}
return NETDEV_TX_OK;
}
/*
* Deal with transmit timeouts coming from the network layer.
*/
static void
xpnet_dev_tx_timeout(struct net_device *dev)
{
dev->stats.tx_errors++;
}
static const struct net_device_ops xpnet_netdev_ops = {
.ndo_open = xpnet_dev_open,
.ndo_stop = xpnet_dev_stop,
.ndo_start_xmit = xpnet_dev_hard_start_xmit,
.ndo_change_mtu = xpnet_dev_change_mtu,
.ndo_tx_timeout = xpnet_dev_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init
xpnet_init(void)
{
int result;
if (!is_shub() && !is_uv())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
xpnet_broadcast_partitions = kzalloc(BITS_TO_LONGS(xp_max_npartitions) *
sizeof(long), GFP_KERNEL);
if (xpnet_broadcast_partitions == NULL)
return -ENOMEM;
/*
* use ether_setup() to init the majority of our device
* structure and then override the necessary pieces.
*/
xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
if (xpnet_device == NULL) {
kfree(xpnet_broadcast_partitions);
return -ENOMEM;
}
netif_carrier_off(xpnet_device);
xpnet_device->netdev_ops = &xpnet_netdev_ops;
xpnet_device->mtu = XPNET_DEF_MTU;
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
/*
* ether_setup() sets this to a multicast device. We are
* really not supporting multicast at this time.
*/
xpnet_device->flags &= ~IFF_MULTICAST;
/*
* No need to checksum as it is a DMA transfer. The BTE will
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
free_netdev(xpnet_device);
kfree(xpnet_broadcast_partitions);
}
return result;
}
module_init(xpnet_init);
static void __exit
xpnet_exit(void)
{
dev_info(xpnet, "unregistering network device %s\n",
xpnet_device[0].name);
unregister_netdev(xpnet_device);
free_netdev(xpnet_device);
kfree(xpnet_broadcast_partitions);
}
module_exit(xpnet_exit);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ResurrectionRemix-Devices/android_kernel_lge_hammerhead | drivers/net/wireless/hostap/hostap_pci.c | 8935 | 11237 | #define PRISM2_PCI
/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
* driver patches from Reyk Floeter <reyk@vantronix.net> and
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "hostap_wlan.h"
static char *dev_info = "hostap_pci";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
"PCI cards.");
MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
MODULE_LICENSE("GPL");
/* struct local_info::hw_priv */
struct hostap_pci_priv {
void __iomem *mem_start;
};
/* FIX: do we need mb/wmb/rmb with memory operations? */
static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
{ 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
/* Samsung MagicLAN SWL-2210P */
{ 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
#ifdef PRISM2_IO_DEBUG
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
writeb(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readb(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
writew(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readw(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a)))
#else /* PRISM2_IO_DEBUG */
static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writeb(v, hw_priv->mem_start + a);
}
static inline u8 hfa384x_inb(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readb(hw_priv->mem_start + a);
}
static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writew(v, hw_priv->mem_start + a);
}
static inline u16 hfa384x_inw(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readw(hw_priv->mem_start + a);
}
#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a)))
#endif /* PRISM2_IO_DEBUG */
static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
*pos++ = HFA384X_INW_DATA(d_off);
if (len & 1)
*((char *) pos) = HFA384X_INB(d_off);
return 0;
}
static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
HFA384X_OUTW_DATA(*pos++, d_off);
if (len & 1)
HFA384X_OUTB(*((char *) pos), d_off);
return 0;
}
/* FIX: This might change at some point.. */
#include "hostap_hw.c"
static void prism2_pci_cor_sreset(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 reg;
reg = HFA384X_INB(HFA384X_PCICOR_OFF);
printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
/* linux-wlan-ng uses extremely long hold and settle times for
* COR sreset. A comment in the driver code mentions that the long
* delays appear to be necessary. However, at least IBM 22P6901 seems
* to work fine with shorter delays.
*
* Longer delays can be configured by uncommenting following line: */
/* #define PRISM2_PCI_USE_LONG_DELAYS */
#ifdef PRISM2_PCI_USE_LONG_DELAYS
int i;
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(250);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(500);
/* Wait for f/w to complete initialization (CMD:BUSY == 0) */
i = 2000000 / 10;
while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
udelay(10);
#else /* PRISM2_PCI_USE_LONG_DELAYS */
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
#endif /* PRISM2_PCI_USE_LONG_DELAYS */
if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
}
}
static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
{
struct net_device *dev = local->dev;
HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
mdelay(10);
HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
mdelay(10);
HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
mdelay(10);
}
static struct prism2_helper_functions prism2_pci_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_pci_cor_sreset,
.genesis_reset = prism2_pci_genesis_reset,
.hw_type = HOSTAP_HW_PCI,
};
static int prism2_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned long phymem;
void __iomem *mem = NULL;
local_info_t *local = NULL;
struct net_device *dev = NULL;
static int cards_found /* = 0 */;
int irq_registered = 0;
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
if (hw_priv == NULL)
return -ENOMEM;
if (pci_enable_device(pdev))
goto err_out_free;
phymem = pci_resource_start(pdev, 0);
if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
goto err_out_disable;
}
mem = pci_ioremap_bar(pdev, 0);
if (mem == NULL) {
printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
goto fail;
}
dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
&pdev->dev);
if (dev == NULL)
goto fail;
iface = netdev_priv(dev);
local = iface->local;
local->hw_priv = hw_priv;
cards_found++;
dev->irq = pdev->irq;
hw_priv->mem_start = mem;
dev->base_addr = (unsigned long) mem;
prism2_pci_cor_sreset(local);
pci_set_drvdata(pdev, dev);
if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
dev)) {
printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
goto fail;
} else
irq_registered = 1;
if (!local->pri_only && prism2_hw_config(dev, 1)) {
printk(KERN_DEBUG "%s: hardware initialization failed\n",
dev_info);
goto fail;
}
printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
"mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
return hostap_hw_ready(dev);
fail:
if (irq_registered && dev)
free_irq(dev->irq, dev);
if (mem)
iounmap(mem);
release_mem_region(phymem, pci_resource_len(pdev, 0));
err_out_disable:
pci_disable_device(pdev);
prism2_free_local_data(dev);
err_out_free:
kfree(hw_priv);
return -ENODEV;
}
static void prism2_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev;
struct hostap_interface *iface;
void __iomem *mem_start;
struct hostap_pci_priv *hw_priv;
dev = pci_get_drvdata(pdev);
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
/* Reset the hardware, and ensure interrupts are disabled. */
prism2_pci_cor_sreset(iface->local);
hfa384x_disable_interrupts(dev);
if (dev->irq)
free_irq(dev->irq, dev);
mem_start = hw_priv->mem_start;
prism2_free_local_data(dev);
kfree(hw_priv);
iounmap(mem_start);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (netif_running(dev)) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int prism2_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
int err;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
dev->name);
return err;
}
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
}
return 0;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static struct pci_driver prism2_pci_driver = {
.name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
#ifdef CONFIG_PM
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
};
static int __init init_prism2_pci(void)
{
return pci_register_driver(&prism2_pci_driver);
}
static void __exit exit_prism2_pci(void)
{
pci_unregister_driver(&prism2_pci_driver);
}
module_init(init_prism2_pci);
module_exit(exit_prism2_pci);
| gpl-2.0 |
samno1607/Jekyll-Hyde | drivers/net/wireless/hostap/hostap_pci.c | 8935 | 11237 | #define PRISM2_PCI
/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
* driver patches from Reyk Floeter <reyk@vantronix.net> and
* Andy Warner <andyw@pobox.com> */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
#include "hostap_wlan.h"
static char *dev_info = "hostap_pci";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
"PCI cards.");
MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
MODULE_LICENSE("GPL");
/* struct local_info::hw_priv */
struct hostap_pci_priv {
void __iomem *mem_start;
};
/* FIX: do we need mb/wmb/rmb with memory operations? */
static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
{ 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
/* Samsung MagicLAN SWL-2210P */
{ 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
#ifdef PRISM2_IO_DEBUG
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
writeb(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readb(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
writew(v, hw_priv->mem_start + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
hw_priv = local->hw_priv;
spin_lock_irqsave(&local->lock, flags);
v = readw(hw_priv->mem_start + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a)))
#else /* PRISM2_IO_DEBUG */
static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writeb(v, hw_priv->mem_start + a);
}
static inline u8 hfa384x_inb(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readb(hw_priv->mem_start + a);
}
static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
writew(v, hw_priv->mem_start + a);
}
static inline u16 hfa384x_inw(struct net_device *dev, int a)
{
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
return readw(hw_priv->mem_start + a);
}
#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw(dev, (a))
#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v)))
#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a)))
#endif /* PRISM2_IO_DEBUG */
static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
*pos++ = HFA384X_INW_DATA(d_off);
if (len & 1)
*((char *) pos) = HFA384X_INB(d_off);
return 0;
}
static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
{
u16 d_off;
__le16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (__le16 *) buf;
for ( ; len > 1; len -= 2)
HFA384X_OUTW_DATA(*pos++, d_off);
if (len & 1)
HFA384X_OUTB(*((char *) pos), d_off);
return 0;
}
/* FIX: This might change at some point.. */
#include "hostap_hw.c"
static void prism2_pci_cor_sreset(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 reg;
reg = HFA384X_INB(HFA384X_PCICOR_OFF);
printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
/* linux-wlan-ng uses extremely long hold and settle times for
* COR sreset. A comment in the driver code mentions that the long
* delays appear to be necessary. However, at least IBM 22P6901 seems
* to work fine with shorter delays.
*
* Longer delays can be configured by uncommenting following line: */
/* #define PRISM2_PCI_USE_LONG_DELAYS */
#ifdef PRISM2_PCI_USE_LONG_DELAYS
int i;
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(250);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(500);
/* Wait for f/w to complete initialization (CMD:BUSY == 0) */
i = 2000000 / 10;
while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
udelay(10);
#else /* PRISM2_PCI_USE_LONG_DELAYS */
HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
mdelay(2);
#endif /* PRISM2_PCI_USE_LONG_DELAYS */
if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
}
}
static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
{
struct net_device *dev = local->dev;
HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
mdelay(10);
HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
mdelay(10);
HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
mdelay(10);
}
static struct prism2_helper_functions prism2_pci_funcs =
{
.card_present = NULL,
.cor_sreset = prism2_pci_cor_sreset,
.genesis_reset = prism2_pci_genesis_reset,
.hw_type = HOSTAP_HW_PCI,
};
static int prism2_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned long phymem;
void __iomem *mem = NULL;
local_info_t *local = NULL;
struct net_device *dev = NULL;
static int cards_found /* = 0 */;
int irq_registered = 0;
struct hostap_interface *iface;
struct hostap_pci_priv *hw_priv;
hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
if (hw_priv == NULL)
return -ENOMEM;
if (pci_enable_device(pdev))
goto err_out_free;
phymem = pci_resource_start(pdev, 0);
if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
goto err_out_disable;
}
mem = pci_ioremap_bar(pdev, 0);
if (mem == NULL) {
printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
goto fail;
}
dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
&pdev->dev);
if (dev == NULL)
goto fail;
iface = netdev_priv(dev);
local = iface->local;
local->hw_priv = hw_priv;
cards_found++;
dev->irq = pdev->irq;
hw_priv->mem_start = mem;
dev->base_addr = (unsigned long) mem;
prism2_pci_cor_sreset(local);
pci_set_drvdata(pdev, dev);
if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
dev)) {
printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
goto fail;
} else
irq_registered = 1;
if (!local->pri_only && prism2_hw_config(dev, 1)) {
printk(KERN_DEBUG "%s: hardware initialization failed\n",
dev_info);
goto fail;
}
printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
"mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
return hostap_hw_ready(dev);
fail:
if (irq_registered && dev)
free_irq(dev->irq, dev);
if (mem)
iounmap(mem);
release_mem_region(phymem, pci_resource_len(pdev, 0));
err_out_disable:
pci_disable_device(pdev);
prism2_free_local_data(dev);
err_out_free:
kfree(hw_priv);
return -ENODEV;
}
static void prism2_pci_remove(struct pci_dev *pdev)
{
struct net_device *dev;
struct hostap_interface *iface;
void __iomem *mem_start;
struct hostap_pci_priv *hw_priv;
dev = pci_get_drvdata(pdev);
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
/* Reset the hardware, and ensure interrupts are disabled. */
prism2_pci_cor_sreset(iface->local);
hfa384x_disable_interrupts(dev);
if (dev->irq)
free_irq(dev->irq, dev);
mem_start = hw_priv->mem_start;
prism2_free_local_data(dev);
kfree(hw_priv);
iounmap(mem_start);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (netif_running(dev)) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int prism2_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
int err;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
dev->name);
return err;
}
pci_restore_state(pdev);
prism2_hw_config(dev, 0);
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
}
return 0;
}
#endif /* CONFIG_PM */
MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
static struct pci_driver prism2_pci_driver = {
.name = "hostap_pci",
.id_table = prism2_pci_id_table,
.probe = prism2_pci_probe,
.remove = prism2_pci_remove,
#ifdef CONFIG_PM
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
};
static int __init init_prism2_pci(void)
{
return pci_register_driver(&prism2_pci_driver);
}
static void __exit exit_prism2_pci(void)
{
pci_unregister_driver(&prism2_pci_driver);
}
module_init(init_prism2_pci);
module_exit(exit_prism2_pci);
| gpl-2.0 |
BORETS24/Kernel-for-Asus-Zenfone-2 | arch/x86/um/delay.c | 10215 | 1171 | /*
* Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
* Mostly copied from arch/x86/lib/delay.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
void __delay(unsigned long loops)
{
asm volatile(
"test %0,%0\n"
"jz 3f\n"
"jmp 1f\n"
".align 16\n"
"1: jmp 2f\n"
".align 16\n"
"2: dec %0\n"
" jnz 2b\n"
"3: dec %0\n"
: /* we don't need output */
: "a" (loops)
);
}
EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
int d0;
xloops *= 4;
asm("mull %%edx"
: "=d" (xloops), "=&a" (d0)
: "1" (xloops), "0"
(loops_per_jiffy * (HZ/4)));
__delay(++xloops);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
| gpl-2.0 |
psndna88/AGNi-pureCM | arch/tile/lib/memchr_32.c | 12263 | 2059 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
void *memchr(const void *s, int c, size_t n)
{
const uint32_t *last_word_ptr;
const uint32_t *p;
const char *last_byte_ptr;
uintptr_t s_int;
uint32_t goal, before_mask, v, bits;
char *ret;
if (__builtin_expect(n == 0, 0)) {
/* Don't dereference any memory if the array is empty. */
return NULL;
}
/* Get an aligned pointer. */
s_int = (uintptr_t) s;
p = (const uint32_t *)(s_int & -4);
/* Create four copies of the byte for which we are looking. */
goal = 0x01010101 * (uint8_t) c;
/* Read the first word, but munge it so that bytes before the array
* will not match goal.
*
* Note that this shift count expression works because we know
* shift counts are taken mod 32.
*/
before_mask = (1 << (s_int << 3)) - 1;
v = (*p | before_mask) ^ (goal & before_mask);
/* Compute the address of the last byte. */
last_byte_ptr = (const char *)s + n - 1;
/* Compute the address of the word containing the last byte. */
last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
while ((bits = __insn_seqb(v, goal)) == 0) {
if (__builtin_expect(p == last_word_ptr, 0)) {
/* We already read the last word in the array,
* so give up.
*/
return NULL;
}
v = *++p;
}
/* We found a match, but it might be in a byte past the end
* of the array.
*/
ret = ((char *)p) + (__insn_ctz(bits) >> 3);
return (ret <= last_byte_ptr) ? ret : NULL;
}
EXPORT_SYMBOL(memchr);
| gpl-2.0 |
Split-Screen/android_kernel_lge_gee | arch/mn10300/mm/cache-smp-flush.c | 12263 | 4590 | /* Functions for global dcache flush when writeback caching in SMP
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "cache-smp.h"
/**
* mn10300_dcache_flush - Globally flush data cache
*
* Flush the data cache on all CPUs.
*/
void mn10300_dcache_flush(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush();
smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_page - Globally flush a page of data cache
* @start: The address of the page of memory to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs covering
* the page that includes the given address.
*/
void mn10300_dcache_flush_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_flush_page(start);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_range - Globally flush range of data cache
* @start: The start address of the region to be flushed.
* @end: The end address of the region to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs, between start and
* end-1 inclusive.
*/
void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_range(start, end);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_range2 - Globally flush range of data cache
* @start: The start address of the region to be flushed.
* @size: The size of the region to be flushed.
*
* Flush a range of addresses in the data cache on all CPUs, between start and
* start+size-1 inclusive.
*/
void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_range2(start, size);
smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv - Globally flush and invalidate data cache
*
* Flush and invalidate the data cache on all CPUs.
*/
void mn10300_dcache_flush_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv();
smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
* cache
* @start: The address of the page of memory to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs
* covering the page that includes the given address.
*/
void mn10300_dcache_flush_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_page(start);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
* cache
* @start: The start address of the region to be flushed and invalidated.
* @end: The end address of the region to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs,
* between start and end-1 inclusive.
*/
void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_range(start, end);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
* cache
* @start: The start address of the region to be flushed and invalidated.
* @size: The size of the region to be flushed and invalidated.
*
* Flush and invalidate a range of addresses in the data cache on all CPUs,
* between start and start+size-1 inclusive.
*/
void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_flush_inv_range2(start, size);
smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}
| gpl-2.0 |
telf/TDR_watchdog_RFC_1 | arch/mn10300/mm/cache-smp-inv.c | 12263 | 4428 | /* Functions for global i/dcache invalidation when caching in SMP
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include "cache-smp.h"
/**
* mn10300_icache_inv - Globally invalidate instruction cache
*
* Invalidate the instruction cache on all CPUs.
*/
void mn10300_icache_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv();
smp_cache_call(SMP_ICACHE_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_page - Globally invalidate a page of instruction cache
* @start: The address of the page of memory to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs
* covering the page that includes the given address.
*/
void mn10300_icache_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_icache_inv_page(start);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_range - Globally invalidate range of instruction cache
* @start: The start address of the region to be invalidated.
* @end: The end address of the region to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs,
* between start and end-1 inclusive.
*/
void mn10300_icache_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv_range(start, end);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
* @start: The start address of the region to be invalidated.
* @size: The size of the region to be invalidated.
*
* Invalidate a range of addresses in the instruction cache on all CPUs,
* between start and start+size-1 inclusive.
*/
void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_icache_inv_range2(start, size);
smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv - Globally invalidate data cache
*
* Invalidate the data cache on all CPUs.
*/
void mn10300_dcache_inv(void)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv();
smp_cache_call(SMP_DCACHE_INV, 0, 0);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_page - Globally invalidate a page of data cache
* @start: The address of the page of memory to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs covering the
* page that includes the given address.
*/
void mn10300_dcache_inv_page(unsigned long start)
{
unsigned long flags;
start &= ~(PAGE_SIZE-1);
flags = smp_lock_cache();
mn10300_local_dcache_inv_page(start);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_range - Globally invalidate range of data cache
* @start: The start address of the region to be invalidated.
* @end: The end address of the region to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs, between start
* and end-1 inclusive.
*/
void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv_range(start, end);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
smp_unlock_cache(flags);
}
/**
* mn10300_dcache_inv_range2 - Globally invalidate range of data cache
* @start: The start address of the region to be invalidated.
* @size: The size of the region to be invalidated.
*
* Invalidate a range of addresses in the data cache on all CPUs, between start
* and start+size-1 inclusive.
*/
void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
{
unsigned long flags;
flags = smp_lock_cache();
mn10300_local_dcache_inv_range2(start, size);
smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
smp_unlock_cache(flags);
}
| gpl-2.0 |
sssemil/android_kernel_samsung_i9105 | arch/sh/drivers/pci/ops-sh5.c | 12775 | 1501 | /*
* Support functions for the SH5 PCI hardware.
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/pci.h>
#include <asm/io.h>
#include "pci-sh5.h"
static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
break;
case 2:
*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
break;
case 4:
*val = SH5PCI_READ(PDR);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
break;
case 2:
SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
break;
case 4:
SH5PCI_WRITE(PDR, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sh5_pci_ops = {
.read = sh5pci_read,
.write = sh5pci_write,
};
| gpl-2.0 |
febycv/htc_kernel_creamed_glacier | arch/sh/drivers/pci/ops-sh5.c | 12775 | 1501 | /*
* Support functions for the SH5 PCI hardware.
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/pci.h>
#include <asm/io.h>
#include "pci-sh5.h"
static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
break;
case 2:
*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
break;
case 4:
*val = SH5PCI_READ(PDR);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
break;
case 2:
SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
break;
case 4:
SH5PCI_WRITE(PDR, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sh5_pci_ops = {
.read = sh5pci_read,
.write = sh5pci_write,
};
| gpl-2.0 |
mcardielo/ThunderG-Kernel_2.6.35 | arch/alpha/kernel/err_ev7.c | 13543 | 8187 | /*
* linux/arch/alpha/kernel/err_ev7.c
*
* Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
*
* Error handling code supporting Alpha systems
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
#include <asm/smp.h>
#include <asm/err_common.h>
#include <asm/err_ev7.h>
#include "err_impl.h"
#include "proto.h"
struct ev7_lf_subpackets *
ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr,
struct ev7_lf_subpackets *lf_subpackets)
{
struct el_subpacket *subpacket;
int i;
/*
* A Marvel machine check frame is always packaged in an
* el_subpacket of class HEADER, type LOGOUT_FRAME.
*/
if (el_ptr->class != EL_CLASS__HEADER ||
el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME)
return NULL;
/*
* It is a logout frame header. Look at the one subpacket.
*/
el_ptr = (struct el_subpacket *)
((unsigned long)el_ptr + el_ptr->length);
/*
* It has to be class PAL, type LOGOUT_FRAME.
*/
if (el_ptr->class != EL_CLASS__PAL ||
el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME)
return NULL;
lf_subpackets->logout = (struct ev7_pal_logout_subpacket *)
el_ptr->by_type.raw.data_start;
/*
* Process the subpackets.
*/
subpacket = (struct el_subpacket *)
((unsigned long)el_ptr + el_ptr->length);
for (i = 0;
subpacket && i < lf_subpackets->logout->subpacket_count;
subpacket = (struct el_subpacket *)
((unsigned long)subpacket + subpacket->length), i++) {
/*
* All subpackets should be class PAL.
*/
if (subpacket->class != EL_CLASS__PAL) {
printk("%s**UNEXPECTED SUBPACKET CLASS %d "
"IN LOGOUT FRAME (packet %d\n",
err_print_prefix, subpacket->class, i);
return NULL;
}
/*
* Remember the subpacket.
*/
switch(subpacket->type) {
case EL_TYPE__PAL__EV7_PROCESSOR:
lf_subpackets->ev7 =
(struct ev7_pal_processor_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_RBOX:
lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_ZBOX:
lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__EV7_IO:
lf_subpackets->io = (struct ev7_pal_io_subpacket *)
subpacket->by_type.raw.data_start;
break;
case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE:
case EL_TYPE__PAL__ENV__AIRMOVER_FAN:
case EL_TYPE__PAL__ENV__VOLTAGE:
case EL_TYPE__PAL__ENV__INTRUSION:
case EL_TYPE__PAL__ENV__POWER_SUPPLY:
case EL_TYPE__PAL__ENV__LAN:
case EL_TYPE__PAL__ENV__HOT_PLUG:
lf_subpackets->env[ev7_lf_env_index(subpacket->type)] =
(struct ev7_pal_environmental_subpacket *)
subpacket->by_type.raw.data_start;
break;
default:
/*
* Don't know what kind of frame this is.
*/
return NULL;
}
}
return lf_subpackets;
}
void
ev7_machine_check(unsigned long vector, unsigned long la_ptr)
{
struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
char *saved_err_prefix = err_print_prefix;
/*
* Sync the processor
*/
mb();
draina();
err_print_prefix = KERN_CRIT;
printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n",
err_print_prefix,
(vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable",
(unsigned int)vector, (int)smp_processor_id());
el_process_subpacket(el_ptr);
err_print_prefix = saved_err_prefix;
/*
* Release the logout frame
*/
wrmces(0x7);
mb();
}
static char *el_ev7_processor_subpacket_annotation[] = {
"Subpacket Header", "I_STAT", "DC_STAT",
"C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0",
"C_STAT", "C_STS", "MM_STAT",
"EXC_ADDR", "IER_CM", "ISUM",
"PAL_BASE", "I_CTL", "PROCESS_CONTEXT",
"CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL",
"CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL",
"BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS",
"BBOX_DAT_RMP", NULL
};
static char *el_ev7_zbox_subpacket_annotation[] = {
"Subpacket Header",
"ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
"ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
"ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR",
"ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL",
"ZBOX(0): reserved / DIFT_ERR_STATUS",
"ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
"ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
"ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR",
"ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL",
"ZBOX(1): reserved / DIFT_ERR_STATUS",
"CBOX_CTL", "CBOX_STP_CTL",
"ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA",
"ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME",
NULL
};
static char *el_ev7_rbox_subpacket_annotation[] = {
"Subpacket Header", "RBOX_CFG", "RBOX_N_CFG",
"RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG",
"RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR",
"RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR",
"RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL",
"RBOX_INTQ", "RBOX_INT", NULL
};
static char *el_ev7_io_subpacket_annotation[] = {
"Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV",
"IO7_UPH", "HPI_CTL", "CRD_CTL",
"HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM",
"PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0",
"PO7_ERR_PKT1", "reserved", "reserved",
"PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT",
"PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT",
"PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT",
"PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
"PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT",
"PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR",
"DM CSR PH", "DM CSR PH", "DM CSR PH",
"DM CSR PH", "reserved",
NULL
};
static struct el_subpacket_annotation el_ev7_pal_annotations[] = {
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_PROCESSOR,
1,
"EV7 Processor Subpacket",
el_ev7_processor_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_ZBOX,
1,
"EV7 ZBOX Subpacket",
el_ev7_zbox_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_RBOX,
1,
"EV7 RBOX Subpacket",
el_ev7_rbox_subpacket_annotation),
SUBPACKET_ANNOTATION(EL_CLASS__PAL,
EL_TYPE__PAL__EV7_IO,
1,
"EV7 IO Subpacket",
el_ev7_io_subpacket_annotation)
};
static struct el_subpacket *
ev7_process_pal_subpacket(struct el_subpacket *header)
{
struct ev7_pal_subpacket *packet;
if (header->class != EL_CLASS__PAL) {
printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
err_print_prefix,
header->class, header->type);
return NULL;
}
packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start;
switch(header->type) {
case EL_TYPE__PAL__LOGOUT_FRAME:
printk("%s*** MCHK occurred on LPID %lld (RBOX %llx)\n",
err_print_prefix,
packet->by_type.logout.whami,
packet->by_type.logout.rbox_whami);
el_print_timestamp(&packet->by_type.logout.timestamp);
printk("%s EXC_ADDR: %016llx\n"
" HALT_CODE: %llx\n",
err_print_prefix,
packet->by_type.logout.exc_addr,
packet->by_type.logout.halt_code);
el_process_subpackets(header,
packet->by_type.logout.subpacket_count);
break;
default:
printk("%s ** PAL TYPE %d SUBPACKET\n",
err_print_prefix,
header->type);
el_annotate_subpacket(header);
break;
}
return (struct el_subpacket *)((unsigned long)header + header->length);
}
struct el_subpacket_handler ev7_pal_subpacket_handler =
SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
void __init
ev7_register_error_handlers(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
}
| gpl-2.0 |
Haxynox/kernel_samsung_n7100-old | drivers/dio/dio.c | 14311 | 8573 | /* Code to support devices on the DIO and DIO-II bus
* Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
* Copyright (C) 2004 Jochen Friedrich <jochen@scram.de>
*
* This code has basically these routines at the moment:
* int dio_find(u_int deviceid)
* Search the list of DIO devices and return the select code
* of the next unconfigured device found that matches the given device ID.
* Note that the deviceid parameter should be the encoded ID.
* This means that framebuffers should pass it as
* DIO_ENCODE_ID(DIO_ID_FBUFFER,DIO_ID2_TOPCAT)
* (or whatever); everybody else just uses DIO_ID_FOOBAR.
* unsigned long dio_scodetophysaddr(int scode)
* Return the physical address corresponding to the given select code.
* int dio_scodetoipl(int scode)
* Every DIO card has a fixed interrupt priority level. This function
* returns it, whatever it is.
* const char *dio_scodetoname(int scode)
* Return a character string describing this board [might be "" if
* not CONFIG_DIO_CONSTANTS]
* void dio_config_board(int scode) mark board as configured in the list
* void dio_unconfig_board(int scode) mark board as no longer configured
*
* This file is based on the way the Amiga port handles Zorro II cards,
* although we aren't so complicated...
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dio.h>
#include <linux/slab.h> /* kmalloc() */
#include <asm/uaccess.h>
#include <asm/io.h> /* readb() */
struct dio_bus dio_bus = {
.resources = {
/* DIO range */
{ .name = "DIO mem", .start = 0x00600000, .end = 0x007fffff },
/* DIO-II range */
{ .name = "DIO-II mem", .start = 0x01000000, .end = 0x1fffffff }
},
.name = "DIO bus"
};
/* not a real config option yet! */
#define CONFIG_DIO_CONSTANTS
#ifdef CONFIG_DIO_CONSTANTS
/* We associate each numeric ID with an appropriate descriptive string
* using a constant array of these structs.
* FIXME: we should be able to arrange to throw away most of the strings
* using the initdata stuff. Then we wouldn't need to worry about
* carrying them around...
* I think we do this by copying them into newly kmalloc()ed memory and
* marking the names[] array as .initdata ?
*/
struct dioname
{
int id;
const char *name;
};
/* useful macro */
#define DIONAME(x) { DIO_ID_##x, DIO_DESC_##x }
#define DIOFBNAME(x) { DIO_ENCODE_ID( DIO_ID_FBUFFER, DIO_ID2_##x), DIO_DESC2_##x }
static struct dioname names[] =
{
DIONAME(DCA0), DIONAME(DCA0REM), DIONAME(DCA1), DIONAME(DCA1REM),
DIONAME(DCM), DIONAME(DCMREM),
DIONAME(LAN),
DIONAME(FHPIB), DIONAME(NHPIB),
DIONAME(SCSI0), DIONAME(SCSI1), DIONAME(SCSI2), DIONAME(SCSI3),
DIONAME(FBUFFER),
DIONAME(PARALLEL), DIONAME(VME), DIONAME(DCL), DIONAME(DCLREM),
DIONAME(MISC0), DIONAME(MISC1), DIONAME(MISC2), DIONAME(MISC3),
DIONAME(MISC4), DIONAME(MISC5), DIONAME(MISC6), DIONAME(MISC7),
DIONAME(MISC8), DIONAME(MISC9), DIONAME(MISC10), DIONAME(MISC11),
DIONAME(MISC12), DIONAME(MISC13),
DIOFBNAME(GATORBOX), DIOFBNAME(TOPCAT), DIOFBNAME(RENAISSANCE),
DIOFBNAME(LRCATSEYE), DIOFBNAME(HRCCATSEYE), DIOFBNAME(HRMCATSEYE),
DIOFBNAME(DAVINCI), DIOFBNAME(XXXCATSEYE), DIOFBNAME(HYPERION),
DIOFBNAME(XGENESIS), DIOFBNAME(TIGER), DIOFBNAME(YGENESIS)
};
#undef DIONAME
#undef DIOFBNAME
static const char *unknowndioname
= "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!";
static const char *dio_getname(int id)
{
/* return pointer to a constant string describing the board with given ID */
unsigned int i;
for (i = 0; i < ARRAY_SIZE(names); i++)
if (names[i].id == id)
return names[i].name;
return unknowndioname;
}
#else
static char dio_no_name[] = { 0 };
#define dio_getname(_id) (dio_no_name)
#endif /* CONFIG_DIO_CONSTANTS */
int __init dio_find(int deviceid)
{
/* Called to find a DIO device before the full bus scan has run.
* Only used by the console driver.
*/
int scode, id;
u_char prid, secid, i;
mm_segment_t fs;
for (scode = 0; scode < DIO_SCMAX; scode++) {
void *va;
unsigned long pa;
if (DIO_SCINHOLE(scode))
continue;
pa = dio_scodetophysaddr(scode);
if (!pa)
continue;
if (scode < DIOII_SCBASE)
va = (void *)(pa + DIO_VIRADDRBASE);
else
va = ioremap(pa, PAGE_SIZE);
fs = get_fs();
set_fs(KERNEL_DS);
if (get_user(i, (unsigned char *)va + DIO_IDOFF)) {
set_fs(fs);
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
}
set_fs(fs);
prid = DIO_ID(va);
if (DIO_NEEDSSECID(prid)) {
secid = DIO_SECID(va);
id = DIO_ENCODE_ID(prid, secid);
} else
id = prid;
if (id == deviceid) {
if (scode >= DIOII_SCBASE)
iounmap(va);
return scode;
}
}
return -1;
}
/* This is the function that scans the DIO space and works out what
* hardware is actually present.
*/
static int __init dio_init(void)
{
int scode;
mm_segment_t fs;
int i;
struct dio_dev *dev;
int error;
if (!MACH_IS_HP300)
return 0;
printk(KERN_INFO "Scanning for DIO devices...\n");
/* Initialize the DIO bus */
INIT_LIST_HEAD(&dio_bus.devices);
dev_set_name(&dio_bus.dev, "dio");
error = device_register(&dio_bus.dev);
if (error) {
pr_err("DIO: Error registering dio_bus\n");
return error;
}
/* Request all resources */
dio_bus.num_resources = (hp300_model == HP_320 ? 1 : 2);
for (i = 0; i < dio_bus.num_resources; i++)
request_resource(&iomem_resource, &dio_bus.resources[i]);
/* Register all devices */
for (scode = 0; scode < DIO_SCMAX; ++scode)
{
u_char prid, secid = 0; /* primary, secondary ID bytes */
u_char *va;
unsigned long pa;
if (DIO_SCINHOLE(scode))
continue;
pa = dio_scodetophysaddr(scode);
if (!pa)
continue;
if (scode < DIOII_SCBASE)
va = (void *)(pa + DIO_VIRADDRBASE);
else
va = ioremap(pa, PAGE_SIZE);
fs = get_fs();
set_fs(KERNEL_DS);
if (get_user(i, (unsigned char *)va + DIO_IDOFF)) {
set_fs(fs);
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
}
set_fs(fs);
/* Found a board, allocate it an entry in the list */
dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
if (!dev)
return 0;
dev->bus = &dio_bus;
dev->dev.parent = &dio_bus.dev;
dev->dev.bus = &dio_bus_type;
dev->scode = scode;
dev->resource.start = pa;
dev->resource.end = pa + DIO_SIZE(scode, va);
dev_set_name(&dev->dev, "%02x", scode);
/* read the ID byte(s) and encode if necessary. */
prid = DIO_ID(va);
if (DIO_NEEDSSECID(prid)) {
secid = DIO_SECID(va);
dev->id = DIO_ENCODE_ID(prid, secid);
} else
dev->id = prid;
dev->ipl = DIO_IPL(va);
strcpy(dev->name,dio_getname(dev->id));
printk(KERN_INFO "select code %3d: ipl %d: ID %02X", dev->scode, dev->ipl, prid);
if (DIO_NEEDSSECID(prid))
printk(":%02X", secid);
printk(": %s\n", dev->name);
if (scode >= DIOII_SCBASE)
iounmap(va);
error = device_register(&dev->dev);
if (error) {
pr_err("DIO: Error registering device %s\n",
dev->name);
continue;
}
error = dio_create_sysfs_dev_files(dev);
if (error)
dev_err(&dev->dev, "Error creating sysfs files\n");
}
return 0;
}
subsys_initcall(dio_init);
/* Bear in mind that this is called in the very early stages of initialisation
* in order to get the address of the serial port for the console...
*/
unsigned long dio_scodetophysaddr(int scode)
{
if (scode >= DIOII_SCBASE) {
return (DIOII_BASE + (scode - 132) * DIOII_DEVSIZE);
} else if (scode > DIO_SCMAX || scode < 0)
return 0;
else if (DIO_SCINHOLE(scode))
return 0;
return (DIO_BASE + scode * DIO_DEVSIZE);
}
| gpl-2.0 |
JustAkan/jolla_kernel_f220k | arch/arm/mach-msm/msm_bus/msm_bus_config.c | 488 | 2331 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/clk.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "msm_bus_core.h"
static DEFINE_MUTEX(msm_bus_config_lock);
/**
* msm_bus_axi_porthalt() - Halt the given axi master port
* @master_port: AXI Master port to be halted
*/
int msm_bus_axi_porthalt(int master_port)
{
int ret = 0;
int priv_id;
struct msm_bus_fabric_device *fabdev;
priv_id = msm_bus_board_get_iid(master_port);
MSM_BUS_DBG("master_port: %d iid: %d fabid%d\n",
master_port, priv_id, GET_FABID(priv_id));
fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id));
if (IS_ERR(fabdev)) {
MSM_BUS_ERR("Fabric device not found for mport: %d\n",
master_port);
return -ENODEV;
}
mutex_lock(&msm_bus_config_lock);
ret = fabdev->algo->port_halt(fabdev, priv_id);
mutex_unlock(&msm_bus_config_lock);
return ret;
}
EXPORT_SYMBOL(msm_bus_axi_porthalt);
/**
* msm_bus_axi_portunhalt() - Unhalt the given axi master port
* @master_port: AXI Master port to be unhalted
*/
int msm_bus_axi_portunhalt(int master_port)
{
int ret = 0;
int priv_id;
struct msm_bus_fabric_device *fabdev;
priv_id = msm_bus_board_get_iid(master_port);
MSM_BUS_DBG("master_port: %d iid: %d fabid: %d\n",
master_port, priv_id, GET_FABID(priv_id));
fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id));
if (IS_ERR(fabdev)) {
MSM_BUS_ERR("Fabric device not found for mport: %d\n",
master_port);
return -ENODEV;
}
mutex_lock(&msm_bus_config_lock);
ret = fabdev->algo->port_unhalt(fabdev, priv_id);
mutex_unlock(&msm_bus_config_lock);
return ret;
}
EXPORT_SYMBOL(msm_bus_axi_portunhalt);
| gpl-2.0 |
dandel/linux-2.6.32.y | fs/xfs/xfs_attr.c | 488 | 60671 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
/*
* xfs_attr.c
*
* Provide the external interfaces to manage attribute lists.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Internal routines when attribute list fits inside the inode.
*/
STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
/*
* Internal routines when attribute list is one block.
*/
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
/*
* Internal routines when attribute list is more than one block.
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context);
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
/*
* Routines to manipulate out-of-line attribute values.
*/
STATIC int xfs_attr_rmtval_set(xfs_da_args_t *args);
STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
#if defined(XFS_ATTR_TRACE)
ktrace_t *xfs_attr_trace_buf;
#endif
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
const char *aname)
{
if (!aname)
return EINVAL;
xname->name = aname;
xname->len = strlen(aname);
if (xname->len >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
return 0;
}
STATIC int
xfs_inode_hasattr(
struct xfs_inode *ip)
{
if (!XFS_IFORK_Q(ip) ||
(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_anextents == 0))
return 0;
return 1;
}
/*========================================================================
* Overall external interface routines.
*========================================================================*/
int
xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name,
char *value, int *valuelenp, int flags)
{
xfs_da_args_t args;
int error;
if (!xfs_inode_hasattr(ip))
return ENOATTR;
/*
* Fill in the arg structure for this request.
*/
memset((char *)&args, 0, sizeof(args));
args.name = name->name;
args.namelen = name->len;
args.value = value;
args.valuelen = *valuelenp;
args.flags = flags;
args.hashval = xfs_da_hashname(args.name, args.namelen);
args.dp = ip;
args.whichfork = XFS_ATTR_FORK;
/*
* Decide on what work routines to call based on the inode size.
*/
if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
error = xfs_attr_shortform_getvalue(&args);
} else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_get(&args);
} else {
error = xfs_attr_node_get(&args);
}
/*
* Return the number of bytes in the value to the caller.
*/
*valuelenp = args.valuelen;
if (error == EEXIST)
error = 0;
return(error);
}
int
xfs_attr_get(
xfs_inode_t *ip,
const char *name,
char *value,
int *valuelenp,
int flags)
{
int error;
struct xfs_name xname;
XFS_STATS_INC(xs_attr_get);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return(EIO);
error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return(error);
}
/*
* Calculate how many blocks we need for the new attribute,
*/
int
xfs_attr_calc_size(
struct xfs_inode *ip,
int namelen,
int valuelen,
int *local)
{
struct xfs_mount *mp = ip->i_mount;
int size;
int nblks;
/*
* Determine space new attribute will use, and if it would be
* "local" or "remote" (note: local != inline).
*/
size = xfs_attr_leaf_newentsize(namelen, valuelen,
mp->m_sb.sb_blocksize, local);
nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
if (*local) {
if (size > (mp->m_sb.sb_blocksize >> 1)) {
/* Double split possible */
nblks *= 2;
}
} else {
/*
* Out of line attribute, cannot double split, but
* make room for the attribute value itself.
*/
uint dblocks = XFS_B_TO_FSB(mp, valuelen);
nblks += dblocks;
nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
}
return nblks;
}
STATIC int
xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
char *value, int valuelen, int flags)
{
xfs_da_args_t args;
xfs_fsblock_t firstblock;
xfs_bmap_free_t flist;
int error, err2, committed;
xfs_mount_t *mp = dp->i_mount;
int rsvd = (flags & ATTR_ROOT) != 0;
int local;
/*
* Attach the dquots to the inode.
*/
error = xfs_qm_dqattach(dp, 0);
if (error)
return error;
/*
* If the inode doesn't have an attribute fork, add one.
* (inode must not be locked when we call this routine)
*/
if (XFS_IFORK_Q(dp) == 0) {
int sf_size = sizeof(xfs_attr_sf_hdr_t) +
XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen);
if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd)))
return(error);
}
/*
* Fill in the arg structure for this request.
*/
memset((char *)&args, 0, sizeof(args));
args.name = name->name;
args.namelen = name->len;
args.value = value;
args.valuelen = valuelen;
args.flags = flags;
args.hashval = xfs_da_hashname(args.name, args.namelen);
args.dp = dp;
args.firstblock = &firstblock;
args.flist = &flist;
args.whichfork = XFS_ATTR_FORK;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
/* Size is now blocks for attribute data */
args.total = xfs_attr_calc_size(dp, name->len, valuelen, &local);
/*
* Start our first transaction of the day.
*
* All future transactions during this code must be "chained" off
* this one via the trans_dup() call. All transactions will contain
* the inode, and the inode will always be marked with trans_ihold().
* Since the inode will be locked in all transactions, we must log
* the inode in every transaction to let it float upward through
* the log.
*/
args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
if (rsvd)
args.trans->t_flags |= XFS_TRANS_RESERVE;
if ((error = xfs_trans_reserve(args.trans, args.total,
XFS_ATTRSET_LOG_RES(mp, args.total), 0,
XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) {
xfs_trans_cancel(args.trans, 0);
return(error);
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
return (error);
}
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
/*
* If the attribute list is non-existent or a shortform list,
* upgrade it to a single-leaf-block attribute list.
*/
if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
((dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) &&
(dp->i_d.di_anextents == 0))) {
/*
* Build initial attribute list (if required).
*/
if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
xfs_attr_shortform_create(&args);
/*
* Try to add the attr to the attribute list in
* the inode.
*/
error = xfs_attr_shortform_addname(&args);
if (error != ENOSPC) {
/*
* Commit the shortform mods, and we're done.
* NOTE: this is also the error path (EEXIST, etc).
*/
ASSERT(args.trans != NULL);
/*
* If this is a synchronous mount, make sure that
* the transaction goes to disk before returning
* to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC) {
xfs_trans_set_sync(args.trans);
}
err2 = xfs_trans_commit(args.trans,
XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
/*
* Hit the inode change time.
*/
if (!error && (flags & ATTR_KERNOTIME) == 0) {
xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
}
return(error == 0 ? err2 : error);
}
/*
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
xfs_bmap_init(args.flist, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
&committed);
}
if (error) {
ASSERT(committed);
args.trans = NULL;
xfs_bmap_cancel(&flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
}
/*
* Commit the leaf transformation. We'll need another (linked)
* transaction to add the new attribute to the leaf.
*/
error = xfs_trans_roll(&args.trans, dp);
if (error)
goto out;
}
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_addname(&args);
} else {
error = xfs_attr_node_addname(&args);
}
if (error) {
goto out;
}
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC) {
xfs_trans_set_sync(args.trans);
}
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
/*
* Hit the inode change time.
*/
if (!error && (flags & ATTR_KERNOTIME) == 0) {
xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
}
return(error);
out:
if (args.trans)
xfs_trans_cancel(args.trans,
XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return(error);
}
int
xfs_attr_set(
xfs_inode_t *dp,
const char *name,
char *value,
int valuelen,
int flags)
{
int error;
struct xfs_name xname;
XFS_STATS_INC(xs_attr_set);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return (EIO);
error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
return xfs_attr_set_int(dp, &xname, value, valuelen, flags);
}
/*
* Generic handler routine to remove a name from an attribute list.
* Transitions attribute list from Btree to shortform as necessary.
*/
STATIC int
xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
{
xfs_da_args_t args;
xfs_fsblock_t firstblock;
xfs_bmap_free_t flist;
int error;
xfs_mount_t *mp = dp->i_mount;
/*
* Fill in the arg structure for this request.
*/
memset((char *)&args, 0, sizeof(args));
args.name = name->name;
args.namelen = name->len;
args.flags = flags;
args.hashval = xfs_da_hashname(args.name, args.namelen);
args.dp = dp;
args.firstblock = &firstblock;
args.flist = &flist;
args.total = 0;
args.whichfork = XFS_ATTR_FORK;
/*
* Attach the dquots to the inode.
*/
error = xfs_qm_dqattach(dp, 0);
if (error)
return error;
/*
* Start our first transaction of the day.
*
* All future transactions during this code must be "chained" off
* this one via the trans_dup() call. All transactions will contain
* the inode, and the inode will always be marked with trans_ihold().
* Since the inode will be locked in all transactions, we must log
* the inode in every transaction to let it float upward through
* the log.
*/
args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
if (flags & ATTR_ROOT)
args.trans->t_flags |= XFS_TRANS_RESERVE;
if ((error = xfs_trans_reserve(args.trans,
XFS_ATTRRM_SPACE_RES(mp),
XFS_ATTRRM_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_ATTRRM_LOG_COUNT))) {
xfs_trans_cancel(args.trans, 0);
return(error);
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
/*
* No need to make quota reservations here. We expect to release some
* blocks not allocate in the common case.
*/
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
/*
* Decide on what work routines to call based on the inode size.
*/
if (!xfs_inode_hasattr(dp)) {
error = XFS_ERROR(ENOATTR);
goto out;
}
if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
error = xfs_attr_shortform_remove(&args);
if (error) {
goto out;
}
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_removename(&args);
} else {
error = xfs_attr_node_removename(&args);
}
if (error) {
goto out;
}
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC) {
xfs_trans_set_sync(args.trans);
}
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
/*
* Hit the inode change time.
*/
if (!error && (flags & ATTR_KERNOTIME) == 0) {
xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
}
return(error);
out:
if (args.trans)
xfs_trans_cancel(args.trans,
XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return(error);
}
int
xfs_attr_remove(
xfs_inode_t *dp,
const char *name,
int flags)
{
int error;
struct xfs_name xname;
XFS_STATS_INC(xs_attr_remove);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return (EIO);
error = xfs_attr_name_to_xname(&xname, name);
if (error)
return error;
xfs_ilock(dp, XFS_ILOCK_SHARED);
if (!xfs_inode_hasattr(dp)) {
xfs_iunlock(dp, XFS_ILOCK_SHARED);
return XFS_ERROR(ENOATTR);
}
xfs_iunlock(dp, XFS_ILOCK_SHARED);
return xfs_attr_remove_int(dp, &xname, flags);
}
int
xfs_attr_list_int(xfs_attr_list_context_t *context)
{
int error;
xfs_inode_t *dp = context->dp;
XFS_STATS_INC(xs_attr_list);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return EIO;
xfs_ilock(dp, XFS_ILOCK_SHARED);
xfs_attr_trace_l_c("syscall start", context);
/*
* Decide on what work routines to call based on the inode size.
*/
if (!xfs_inode_hasattr(dp)) {
error = 0;
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
error = xfs_attr_shortform_list(context);
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_list(context);
} else {
error = xfs_attr_node_list(context);
}
xfs_iunlock(dp, XFS_ILOCK_SHARED);
xfs_attr_trace_l_c("syscall end", context);
return error;
}
#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
(((struct attrlist_ent *) 0)->a_name - (char *) 0)
#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
& ~(sizeof(u_int32_t)-1))
/*
* Format an attribute and copy it out to the user's buffer.
* Take care to check values and protect against them changing later,
* we may be reading them directly out of a user buffer.
*/
/*ARGSUSED*/
STATIC int
xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
char *name, int namelen,
int valuelen, char *value)
{
struct attrlist *alist = (struct attrlist *)context->alist;
attrlist_ent_t *aep;
int arraytop;
ASSERT(!(context->flags & ATTR_KERNOVAL));
ASSERT(context->count >= 0);
ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
ASSERT(context->firstu >= sizeof(*alist));
ASSERT(context->firstu <= context->bufsize);
/*
* Only list entries in the right namespace.
*/
if (((context->flags & ATTR_SECURE) == 0) !=
((flags & XFS_ATTR_SECURE) == 0))
return 0;
if (((context->flags & ATTR_ROOT) == 0) !=
((flags & XFS_ATTR_ROOT) == 0))
return 0;
arraytop = sizeof(*alist) +
context->count * sizeof(alist->al_offset[0]);
context->firstu -= ATTR_ENTSIZE(namelen);
if (context->firstu < arraytop) {
xfs_attr_trace_l_c("buffer full", context);
alist->al_more = 1;
context->seen_enough = 1;
return 1;
}
aep = (attrlist_ent_t *)&context->alist[context->firstu];
aep->a_valuelen = valuelen;
memcpy(aep->a_name, name, namelen);
aep->a_name[namelen] = 0;
alist->al_offset[context->count++] = context->firstu;
alist->al_count = context->count;
xfs_attr_trace_l_c("add", context);
return 0;
}
/*
* Generate a list of extended attribute names and optionally
* also value lengths. Positive return value follows the XFS
* convention of being an error, zero or negative return code
* is the length of the buffer returned (negated), indicating
* success.
*/
int
xfs_attr_list(
xfs_inode_t *dp,
char *buffer,
int bufsize,
int flags,
attrlist_cursor_kern_t *cursor)
{
xfs_attr_list_context_t context;
struct attrlist *alist;
int error;
/*
* Validate the cursor.
*/
if (cursor->pad1 || cursor->pad2)
return(XFS_ERROR(EINVAL));
if ((cursor->initted == 0) &&
(cursor->hashval || cursor->blkno || cursor->offset))
return XFS_ERROR(EINVAL);
/*
* Check for a properly aligned buffer.
*/
if (((long)buffer) & (sizeof(int)-1))
return XFS_ERROR(EFAULT);
if (flags & ATTR_KERNOVAL)
bufsize = 0;
/*
* Initialize the output buffer.
*/
memset(&context, 0, sizeof(context));
context.dp = dp;
context.cursor = cursor;
context.resynch = 1;
context.flags = flags;
context.alist = buffer;
context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
context.firstu = context.bufsize;
context.put_listent = xfs_attr_put_listent;
alist = (struct attrlist *)context.alist;
alist->al_count = 0;
alist->al_more = 0;
alist->al_offset[0] = context.bufsize;
error = xfs_attr_list_int(&context);
ASSERT(error >= 0);
return error;
}
int /* error */
xfs_attr_inactive(xfs_inode_t *dp)
{
xfs_trans_t *trans;
xfs_mount_t *mp;
int error;
mp = dp->i_mount;
ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
xfs_ilock(dp, XFS_ILOCK_SHARED);
if (!xfs_inode_hasattr(dp) ||
dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
xfs_iunlock(dp, XFS_ILOCK_SHARED);
return 0;
}
xfs_iunlock(dp, XFS_ILOCK_SHARED);
/*
* Start our first transaction of the day.
*
* All future transactions during this code must be "chained" off
* this one via the trans_dup() call. All transactions will contain
* the inode, and the inode will always be marked with trans_ihold().
* Since the inode will be locked in all transactions, we must log
* the inode in every transaction to let it float upward through
* the log.
*/
trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_ATTRINVAL_LOG_COUNT))) {
xfs_trans_cancel(trans, 0);
return(error);
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
/*
* No need to make quota reservations here. We expect to release some
* blocks, not allocate, in the common case.
*/
xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(trans, dp);
/*
* Decide on what work routines to call based on the inode size.
*/
if (!xfs_inode_hasattr(dp) ||
dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
error = 0;
goto out;
}
error = xfs_attr_root_inactive(&trans, dp);
if (error)
goto out;
/*
* signal synchronous inactive transactions unless this
* is a synchronous mount filesystem in which case we
* know that we're here because we've been called out of
* xfs_inactive which means that the last reference is gone
* and the unlink transaction has already hit the disk so
* async inactive transactions are safe.
*/
if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
(!(mp->m_flags & XFS_MOUNT_WSYNC)
? 1 : 0))))
goto out;
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return(error);
out:
xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return(error);
}
/*========================================================================
* External routines when attribute list is inside the inode
*========================================================================*/
/*
* Add a name to the shortform attribute list structure
* This is the external routine.
*/
STATIC int
xfs_attr_shortform_addname(xfs_da_args_t *args)
{
int newsize, forkoff, retval;
retval = xfs_attr_shortform_lookup(args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
return(retval);
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE)
return(retval);
retval = xfs_attr_shortform_remove(args);
ASSERT(retval == 0);
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return(XFS_ERROR(ENOSPC));
newsize = XFS_ATTR_SF_TOTSIZE(args->dp);
newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize);
if (!forkoff)
return(XFS_ERROR(ENOSPC));
xfs_attr_shortform_add(args, forkoff);
return(0);
}
/*========================================================================
* External routines when attribute list is one block
*========================================================================*/
/*
* Add a name to the leaf attribute list structure
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_addname(xfs_da_args_t *args)
{
xfs_inode_t *dp;
xfs_dabuf_t *bp;
int retval, error, committed, forkoff;
/*
* Read the (only) block in the attribute list in.
*/
dp = args->dp;
args->blkno = 0;
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
return(error);
ASSERT(bp != NULL);
/*
* Look up the given attribute in the leaf block. Figure out if
* the given flags produce an error or call for an atomic rename.
*/
retval = xfs_attr_leaf_lookup_int(bp, args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
xfs_da_brelse(args->trans, bp);
return(retval);
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE) { /* pure create op */
xfs_da_brelse(args->trans, bp);
return(retval);
}
args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
}
/*
* Add the attribute to the leaf block, transitioning to a Btree
* if required.
*/
retval = xfs_attr_leaf_add(bp, args);
xfs_da_buf_done(bp);
if (retval == ENOSPC) {
/*
* Promote the attribute list to the Btree format, then
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
/*
* Commit the current trans (including the inode) and start
* a new one.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
/*
* Fob the whole rest of the problem off on the Btree code.
*/
error = xfs_attr_node_addname(args);
return(error);
}
/*
* Commit the transaction that added the attr name so that
* later routines can manage their own transactions.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
/*
* If there was an out-of-line value, allocate the blocks we
* identified for its storage and copy the value. This is done
* after we create the attribute so that we don't overflow the
* maximum size of a transaction and/or hit a deadlock.
*/
if (args->rmtblkno > 0) {
error = xfs_attr_rmtval_set(args);
if (error)
return(error);
}
/*
* If this is an atomic rename operation, we must "flip" the
* incomplete flags on the "new" and "old" attribute/value pairs
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
*/
error = xfs_attr_leaf_flipflags(args);
if (error)
return(error);
/*
* Dismantle the "old" attribute/value pair by removing
* a "remote" value (if it exists).
*/
args->index = args->index2;
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
return(error);
}
/*
* Read in the block containing the "old" attr, then
* remove the "old" attr from that block (neat, huh!)
*/
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1,
&bp, XFS_ATTR_FORK);
if (error)
return(error);
ASSERT(bp != NULL);
(void)xfs_attr_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
} else
xfs_da_buf_done(bp);
/*
* Commit the remove and start the next trans in series.
*/
error = xfs_trans_roll(&args->trans, dp);
} else if (args->rmtblkno > 0) {
/*
* Added a "remote" value, just clear the incomplete flag.
*/
error = xfs_attr_leaf_clearflag(args);
}
return(error);
}
/*
* Remove a name from the leaf attribute list structure
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_removename(xfs_da_args_t *args)
{
xfs_inode_t *dp;
xfs_dabuf_t *bp;
int error, committed, forkoff;
/*
* Remove the attribute.
*/
dp = args->dp;
args->blkno = 0;
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error) {
return(error);
}
ASSERT(bp != NULL);
error = xfs_attr_leaf_lookup_int(bp, args);
if (error == ENOATTR) {
xfs_da_brelse(args->trans, bp);
return(error);
}
(void)xfs_attr_leaf_remove(bp, args);
/*
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
} else
xfs_da_buf_done(bp);
return(0);
}
/*
* Look up a name in a leaf attribute list structure.
*
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
xfs_dabuf_t *bp;
int error;
args->blkno = 0;
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
return(error);
ASSERT(bp != NULL);
error = xfs_attr_leaf_lookup_int(bp, args);
if (error != EEXIST) {
xfs_da_brelse(args->trans, bp);
return(error);
}
error = xfs_attr_leaf_getvalue(bp, args);
xfs_da_brelse(args->trans, bp);
if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
error = xfs_attr_rmtval_get(args);
}
return(error);
}
/*
* Copy out attribute entries for attr_list(), for leaf attribute lists.
*/
STATIC int
xfs_attr_leaf_list(xfs_attr_list_context_t *context)
{
xfs_attr_leafblock_t *leaf;
int error;
xfs_dabuf_t *bp;
context->cursor->blkno = 0;
error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
if (error)
return XFS_ERROR(error);
ASSERT(bp != NULL);
leaf = bp->data;
if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
return XFS_ERROR(EFSCORRUPTED);
}
error = xfs_attr_leaf_list_int(bp, context);
xfs_da_brelse(NULL, bp);
return XFS_ERROR(error);
}
/*========================================================================
* External routines when attribute list size > XFS_LBSIZE(mp).
*========================================================================*/
/*
* Add a name to a Btree-format attribute list.
*
* This will involve walking down the Btree, and may involve splitting
* leaf nodes and even splitting intermediate nodes up to and including
* the root node (a special case of an intermediate node).
*
* "Remote" attribute values confuse the issue and atomic rename operations
* add a whole extra layer of confusion on top of that.
*/
STATIC int
xfs_attr_node_addname(xfs_da_args_t *args)
{
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
xfs_mount_t *mp;
int committed, retval, error;
/*
* Fill in bucket of arguments/results/context to carry around.
*/
dp = args->dp;
mp = dp->i_mount;
restart:
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
state->blocksize = state->mp->m_sb.sb_blocksize;
state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name already exists, and get back a pointer
* to where it should go.
*/
error = xfs_da_node_lookup_int(state, &retval);
if (error)
goto out;
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
goto out;
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE)
goto out;
args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
args->rmtblkno = 0;
args->rmtblkcnt = 0;
}
retval = xfs_attr_leaf_add(blk->bp, state->args);
if (retval == ENOSPC) {
if (state->path.active == 1) {
/*
* Its really a single leaf node, but it had
* out-of-line values so it looked like it *might*
* have been a b-tree.
*/
xfs_da_state_free(state);
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
/*
* Commit the node conversion and start the next
* trans in the chain.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
goto out;
goto restart;
}
/*
* Split as many Btree elements as required.
* This code tracks the new and old attr's location
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_split(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
} else {
/*
* Addition succeeded, update Btree hashvals.
*/
xfs_da_fixhashpath(state, &state->path);
}
/*
* Kill the state structure, we're done with it and need to
* allow the buffers to come back later.
*/
xfs_da_state_free(state);
state = NULL;
/*
* Commit the leaf addition or btree split and start the next
* trans in the chain.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
goto out;
/*
* If there was an out-of-line value, allocate the blocks we
* identified for its storage and copy the value. This is done
* after we create the attribute so that we don't overflow the
* maximum size of a transaction and/or hit a deadlock.
*/
if (args->rmtblkno > 0) {
error = xfs_attr_rmtval_set(args);
if (error)
return(error);
}
/*
* If this is an atomic rename operation, we must "flip" the
* incomplete flags on the "new" and "old" attribute/value pairs
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
*/
error = xfs_attr_leaf_flipflags(args);
if (error)
goto out;
/*
* Dismantle the "old" attribute/value pair by removing
* a "remote" value (if it exists).
*/
args->index = args->index2;
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
return(error);
}
/*
* Re-find the "old" attribute entry after any split ops.
* The INCOMPLETE flag means that we will find the "old"
* attr, not the "new" one.
*/
args->flags |= XFS_ATTR_INCOMPLETE;
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
state->blocksize = state->mp->m_sb.sb_blocksize;
state->node_ents = state->mp->m_attr_node_ents;
state->inleaf = 0;
error = xfs_da_node_lookup_int(state, &retval);
if (error)
goto out;
/*
* Remove the name and update the hashvals in the tree.
*/
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
error = xfs_attr_leaf_remove(blk->bp, args);
xfs_da_fixhashpath(state, &state->path);
/*
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
}
/*
* Commit and start the next trans in the chain.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
goto out;
} else if (args->rmtblkno > 0) {
/*
* Added a "remote" value, just clear the incomplete flag.
*/
error = xfs_attr_leaf_clearflag(args);
if (error)
goto out;
}
retval = error = 0;
out:
if (state)
xfs_da_state_free(state);
if (error)
return(error);
return(retval);
}
/*
* Remove a name from a B-tree attribute list.
*
* This will involve walking down the Btree, and may involve joining
* leaf nodes and even joining intermediate nodes up to and including
* the root node (a special case of an intermediate node).
*/
STATIC int
xfs_attr_node_removename(xfs_da_args_t *args)
{
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
xfs_dabuf_t *bp;
int retval, error, committed, forkoff;
/*
* Tie a string around our finger to remind us where we are.
*/
dp = args->dp;
state = xfs_da_state_alloc();
state->args = args;
state->mp = dp->i_mount;
state->blocksize = state->mp->m_sb.sb_blocksize;
state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da_node_lookup_int(state, &retval);
if (error || (retval != EEXIST)) {
if (error == 0)
error = retval;
goto out;
}
/*
* If there is an out-of-line value, de-allocate the blocks.
* This is done before we remove the attribute so that we don't
* overflow the maximum size of a transaction and/or hit a deadlock.
*/
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->bp != NULL);
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
if (args->rmtblkno > 0) {
/*
* Fill in disk block numbers in the state structure
* so that we can get the buffers back after we commit
* several transactions in the following calls.
*/
error = xfs_attr_fillstate(state);
if (error)
goto out;
/*
* Mark the attribute as INCOMPLETE, then bunmapi() the
* remote value.
*/
error = xfs_attr_leaf_setflag(args);
if (error)
goto out;
error = xfs_attr_rmtval_remove(args);
if (error)
goto out;
/*
* Refill the state structure with buffers, the prior calls
* released our buffers.
*/
error = xfs_attr_refillstate(state);
if (error)
goto out;
}
/*
* Remove the name and update the hashvals in the tree.
*/
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
retval = xfs_attr_leaf_remove(blk->bp, args);
xfs_da_fixhashpath(state, &state->path);
/*
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
/*
* Commit the Btree join operation and start a new trans.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
goto out;
}
/*
* If the result is small enough, push it all into the inode.
*/
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
/*
* Have to get rid of the copy of this dabuf in the state.
*/
ASSERT(state->path.active == 1);
ASSERT(state->path.blk[0].bp);
xfs_da_buf_done(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
XFS_ATTR_FORK);
if (error)
goto out;
ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
bp->data)->hdr.info.magic)
== XFS_ATTR_LEAF_MAGIC);
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
error = xfs_bmap_finish(&args->trans,
args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
goto out;
}
/*
* bmap_finish() may have committed the last trans
* and started a new one. We need the inode to be
* in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
} else
xfs_da_brelse(args->trans, bp);
}
error = 0;
out:
xfs_da_state_free(state);
return(error);
}
/*
* Fill in the disk block numbers in the state structure for the buffers
* that are attached to the state structure.
* This is done so that we can quickly reattach ourselves to those buffers
* after some set of transaction commits have released these buffers.
*/
STATIC int
xfs_attr_fillstate(xfs_da_state_t *state)
{
xfs_da_state_path_t *path;
xfs_da_state_blk_t *blk;
int level;
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
*/
path = &state->path;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_da_blkno(blk->bp);
xfs_da_buf_done(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
}
}
/*
* Roll down the "altpath" in the state structure, storing the on-disk
* block number for those buffers in the "altpath".
*/
path = &state->altpath;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_da_blkno(blk->bp);
xfs_da_buf_done(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
}
}
return(0);
}
/*
* Reattach the buffers to the state structure based on the disk block
* numbers stored in the state structure.
* This is done after some set of transaction commits have released those
* buffers from our grip.
*/
STATIC int
xfs_attr_refillstate(xfs_da_state_t *state)
{
xfs_da_state_path_t *path;
xfs_da_state_blk_t *blk;
int level, error;
/*
* Roll down the "path" in the state structure, storing the on-disk
* block number for those buffers in the "path".
*/
path = &state->path;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
error = xfs_da_read_buf(state->args->trans,
state->args->dp,
blk->blkno, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
if (error)
return(error);
} else {
blk->bp = NULL;
}
}
/*
* Roll down the "altpath" in the state structure, storing the on-disk
* block number for those buffers in the "altpath".
*/
path = &state->altpath;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
error = xfs_da_read_buf(state->args->trans,
state->args->dp,
blk->blkno, blk->disk_blkno,
&blk->bp, XFS_ATTR_FORK);
if (error)
return(error);
} else {
blk->bp = NULL;
}
}
return(0);
}
/*
* Look up a filename in a node attribute list.
*
* This routine gets called for any attribute fork that has more than one
* block, ie: both true Btree attr lists and for single-leaf-blocks with
* "remote" values taking up more blocks.
*/
STATIC int
xfs_attr_node_get(xfs_da_args_t *args)
{
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
int error, retval;
int i;
state = xfs_da_state_alloc();
state->args = args;
state->mp = args->dp->i_mount;
state->blocksize = state->mp->m_sb.sb_blocksize;
state->node_ents = state->mp->m_attr_node_ents;
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da_node_lookup_int(state, &retval);
if (error) {
retval = error;
} else if (retval == EEXIST) {
blk = &state->path.blk[ state->path.active-1 ];
ASSERT(blk->bp != NULL);
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
/*
* Get the value, local or "remote"
*/
retval = xfs_attr_leaf_getvalue(blk->bp, args);
if (!retval && (args->rmtblkno > 0)
&& !(args->flags & ATTR_KERNOVAL)) {
retval = xfs_attr_rmtval_get(args);
}
}
/*
* If not in a transaction, we have to release all the buffers.
*/
for (i = 0; i < state->path.active; i++) {
xfs_da_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
xfs_da_state_free(state);
return(retval);
}
STATIC int /* error */
xfs_attr_node_list(xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
xfs_attr_leafblock_t *leaf;
xfs_da_intnode_t *node;
xfs_da_node_entry_t *btree;
int error, i;
xfs_dabuf_t *bp;
cursor = context->cursor;
cursor->initted = 1;
/*
* Do all sorts of validation on the passed-in cursor structure.
* If anything is amiss, ignore the cursor and look up the hashval
* starting from the btree root.
*/
bp = NULL;
if (cursor->blkno > 0) {
error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
if ((error != 0) && (error != EFSCORRUPTED))
return(error);
if (bp) {
node = bp->data;
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
xfs_attr_trace_l_cn("wrong blk", context, node);
xfs_da_brelse(NULL, bp);
bp = NULL;
break;
case XFS_ATTR_LEAF_MAGIC:
leaf = bp->data;
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
xfs_attr_trace_l_cl("wrong blk",
context, leaf);
xfs_da_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
be32_to_cpu(leaf->entries[0].hashval)) {
xfs_attr_trace_l_cl("maybe wrong blk",
context, leaf);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
break;
default:
xfs_attr_trace_l_c("wrong blk - ??", context);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
}
}
/*
* We did not find what we expected given the cursor's contents,
* so we start from the top and work down based on the hash value.
* Note that start of node block is same as start of leaf block.
*/
if (bp == NULL) {
cursor->blkno = 0;
for (;;) {
error = xfs_da_read_buf(NULL, context->dp,
cursor->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
return(error);
if (unlikely(bp == NULL)) {
XFS_ERROR_REPORT("xfs_attr_node_list(2)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount);
return(XFS_ERROR(EFSCORRUPTED));
}
node = bp->data;
if (be16_to_cpu(node->hdr.info.magic)
== XFS_ATTR_LEAF_MAGIC)
break;
if (unlikely(be16_to_cpu(node->hdr.info.magic)
!= XFS_DA_NODE_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount,
node);
xfs_da_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
btree = node->btree;
for (i = 0; i < be16_to_cpu(node->hdr.count);
btree++, i++) {
if (cursor->hashval
<= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
xfs_attr_trace_l_cb("descending",
context, btree);
break;
}
}
if (i == be16_to_cpu(node->hdr.count)) {
xfs_da_brelse(NULL, bp);
return(0);
}
xfs_da_brelse(NULL, bp);
}
}
ASSERT(bp != NULL);
/*
* Roll upward through the blocks, processing each leaf block in
* order. As long as there is space in the result buffer, keep
* adding the information.
*/
for (;;) {
leaf = bp->data;
if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
!= XFS_ATTR_LEAF_MAGIC)) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
error = xfs_attr_leaf_list_int(bp, context);
if (error) {
xfs_da_brelse(NULL, bp);
return error;
}
if (context->seen_enough || leaf->hdr.info.forw == 0)
break;
cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
xfs_da_brelse(NULL, bp);
error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
if (error)
return(error);
if (unlikely((bp == NULL))) {
XFS_ERROR_REPORT("xfs_attr_node_list(5)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount);
return(XFS_ERROR(EFSCORRUPTED));
}
}
xfs_da_brelse(NULL, bp);
return(0);
}
/*========================================================================
* External routines for manipulating out-of-line attribute values.
*========================================================================*/
/*
* Read the value associated with an attribute from the out-of-line buffer
* that we stored it in.
*/
int
xfs_attr_rmtval_get(xfs_da_args_t *args)
{
xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
xfs_mount_t *mp;
xfs_daddr_t dblkno;
xfs_caddr_t dst;
xfs_buf_t *bp;
int nmap, error, tmp, valuelen, blkcnt, i;
xfs_dablk_t lblkno;
ASSERT(!(args->flags & ATTR_KERNOVAL));
mp = args->dp->i_mount;
dst = args->value;
valuelen = args->valuelen;
lblkno = args->rmtblkno;
while (valuelen > 0) {
nmap = ATTR_RMTVALUE_MAPSIZE;
error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
NULL, 0, map, &nmap, NULL, NULL);
if (error)
return(error);
ASSERT(nmap >= 1);
for (i = 0; (i < nmap) && (valuelen > 0); i++) {
ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
blkcnt,
XFS_BUF_LOCK | XBF_DONT_BLOCK,
&bp);
if (error)
return(error);
tmp = (valuelen < XFS_BUF_SIZE(bp))
? valuelen : XFS_BUF_SIZE(bp);
xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
xfs_buf_relse(bp);
dst += tmp;
valuelen -= tmp;
lblkno += map[i].br_blockcount;
}
}
ASSERT(valuelen == 0);
return(0);
}
/*
* Write the value associated with an attribute into the out-of-line buffer
* that we have defined for it.
*/
STATIC int
xfs_attr_rmtval_set(xfs_da_args_t *args)
{
xfs_mount_t *mp;
xfs_fileoff_t lfileoff;
xfs_inode_t *dp;
xfs_bmbt_irec_t map;
xfs_daddr_t dblkno;
xfs_caddr_t src;
xfs_buf_t *bp;
xfs_dablk_t lblkno;
int blkcnt, valuelen, nmap, error, tmp, committed;
dp = args->dp;
mp = dp->i_mount;
src = args->value;
/*
* Find a "hole" in the attribute address space large enough for
* us to drop the new attribute's value into.
*/
blkcnt = XFS_B_TO_FSB(mp, args->valuelen);
lfileoff = 0;
error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
XFS_ATTR_FORK);
if (error) {
return(error);
}
args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
args->rmtblkcnt = blkcnt;
/*
* Roll through the "value", allocating blocks on disk as required.
*/
while (blkcnt > 0) {
/*
* Allocate a single extent, up to the size of the value.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
XFS_BMAPI_WRITE,
args->firstblock, args->total, &map, &nmap,
args->flist, NULL);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, dp);
}
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
/*
* Start the next trans in the chain.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
}
/*
* Roll through the "value", copying the attribute value to the
* already-allocated blocks. Blocks are written synchronously
* so that we can know they are all on disk before we turn off
* the INCOMPLETE flag.
*/
lblkno = args->rmtblkno;
valuelen = args->valuelen;
while (valuelen > 0) {
/*
* Try to remember where we decided to put the value.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
NULL, NULL);
if (error) {
return(error);
}
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt,
XFS_BUF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp);
xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
if (tmp < XFS_BUF_SIZE(bp))
xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
return (error);
}
src += tmp;
valuelen -= tmp;
lblkno += map.br_blockcount;
}
ASSERT(valuelen == 0);
return(0);
}
/*
* Remove the value associated with an attribute by deleting the
* out-of-line buffer that it is stored on.
*/
STATIC int
xfs_attr_rmtval_remove(xfs_da_args_t *args)
{
xfs_mount_t *mp;
xfs_bmbt_irec_t map;
xfs_buf_t *bp;
xfs_daddr_t dblkno;
xfs_dablk_t lblkno;
int valuelen, blkcnt, nmap, error, done, committed;
mp = args->dp->i_mount;
/*
* Roll through the "value", invalidating the attribute value's
* blocks.
*/
lblkno = args->rmtblkno;
valuelen = args->rmtblkcnt;
while (valuelen > 0) {
/*
* Try to remember where we decided to put the value.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
args->flist, NULL);
if (error) {
return(error);
}
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
/*
* If the "remote" value is in the cache, remove it.
*/
bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
XFS_INCORE_TRYLOCK);
if (bp) {
XFS_BUF_STALE(bp);
XFS_BUF_UNDELAYWRITE(bp);
xfs_buf_relse(bp);
bp = NULL;
}
valuelen -= map.br_blockcount;
lblkno += map.br_blockcount;
}
/*
* Keep de-allocating extents until the remote-value region is gone.
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
1, args->firstblock, args->flist,
NULL, &done);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed) {
xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args->trans, args->dp);
}
/*
* Close out trans and start the next one in the chain.
*/
error = xfs_trans_roll(&args->trans, args->dp);
if (error)
return (error);
}
return(0);
}
#if defined(XFS_ATTR_TRACE)
/*
* Add a trace buffer entry for an attr_list context structure.
*/
void
xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
{
xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
(__psunsigned_t)NULL,
(__psunsigned_t)NULL,
(__psunsigned_t)NULL);
}
/*
* Add a trace buffer entry for a context structure and a Btree node.
*/
void
xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
struct xfs_da_intnode *node)
{
xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
(__psunsigned_t)be16_to_cpu(node->hdr.count),
(__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
(__psunsigned_t)be32_to_cpu(node->btree[
be16_to_cpu(node->hdr.count)-1].hashval));
}
/*
* Add a trace buffer entry for a context structure and a Btree element.
*/
void
xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
struct xfs_da_node_entry *btree)
{
xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
(__psunsigned_t)be32_to_cpu(btree->hashval),
(__psunsigned_t)be32_to_cpu(btree->before),
(__psunsigned_t)NULL);
}
/*
* Add a trace buffer entry for a context structure and a leaf block.
*/
void
xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
struct xfs_attr_leafblock *leaf)
{
xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
(__psunsigned_t)be16_to_cpu(leaf->hdr.count),
(__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
(__psunsigned_t)be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval));
}
/*
* Add a trace buffer entry for the arguments given to the routine,
* generic form.
*/
void
xfs_attr_trace_enter(int type, char *where,
struct xfs_attr_list_context *context,
__psunsigned_t a13, __psunsigned_t a14,
__psunsigned_t a15)
{
ASSERT(xfs_attr_trace_buf);
ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
(void *)((__psunsigned_t)where),
(void *)((__psunsigned_t)context->dp),
(void *)((__psunsigned_t)context->cursor->hashval),
(void *)((__psunsigned_t)context->cursor->blkno),
(void *)((__psunsigned_t)context->cursor->offset),
(void *)((__psunsigned_t)context->alist),
(void *)((__psunsigned_t)context->bufsize),
(void *)((__psunsigned_t)context->count),
(void *)((__psunsigned_t)context->firstu),
NULL,
(void *)((__psunsigned_t)context->dupcnt),
(void *)((__psunsigned_t)context->flags),
(void *)a13, (void *)a14, (void *)a15);
}
#endif /* XFS_ATTR_TRACE */
| gpl-2.0 |
lollipop-og/android_kernel_geehrc | drivers/gpu/msm/adreno_a4xx.c | 744 | 10314 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "adreno.h"
#include "a4xx_reg.h"
#include "adreno_a3xx.h"
/*
* Set of registers to dump for A4XX on postmortem and snapshot.
* Registers in pairs - first value is the start offset, second
* is the stop offset (inclusive)
*/
const unsigned int a4xx_registers[] = {
0x0000, 0x0002, /* RBBM_HW_VERSION - RBBM_HW_CONFIGURATION */
0x0020, 0x0020, /* RBBM_CLOCK_CTL */
0x0021, 0x0021, /* RBBM_SP_HYST_CNT */
0x0023, 0x0024, /* RBBM_AHB_CTL0 - RBBM_AHB_CTL1 */
0x0026, 0x0026, /* RBBM_RB_SUB_BLOCK_SEL_CTL */
0x0028, 0x0034, /* RBBM_RAM_ACC_63_32 - RBBM_INTERFACE_HANG_MASK_CTL4 */
0x0037, 0x003f, /* RBBM_INT_0_MASK - RBBM_AHB_DEBUG_CTL */
0x0041, 0x0045, /* RBBM_VBIF_DEBUG_CTL - BLOCK_SW_RESET_CMD */
0x0047, 0x0049, /* RBBM_RESET_CYCLES - RBBM_EXT_TRACE_BUS_CTL */
0x009c, 0x0170, /* RBBM_PERFCTR_CP_0_LO - RBBM_PERFCTR_CTL */
0x0174, 0x0182, /* RBBM_PERFCTR_LOAD_VALUE_LO - RBBM_CLOCK_STATUS */
0x0189, 0x019f, /* RBBM_AHB_STATUS - RBBM_INTERFACE_RRDY_STATUS5 */
0x0206, 0x0217, /* CP_IB1_BASE - CP_ME_RB_DONE_DATA */
0x0219, 0x0219, /* CP_QUEUE_THRESH2 */
0x021b, 0x021b, /* CP_MERCIU_SIZE */
0x0228, 0x0229, /* CP_SCRATCH_UMASK - CP_SCRATCH_ADDR */
0x022a, 0x022c, /* CP_PREEMPT - CP_CNTL */
0x022e, 0x022e, /* CP_DEBUG */
0x0231, 0x0232, /* CP_DEBUG_ECO_CONTROL - CP_DRAW_STATE_ADDR */
0x0240, 0x0250, /* CP_PROTECT_REG_0 - CP_PROTECT_CTRL */
0x04c0, 0x04ce, /* CP_ST_BASE - CP_STQ_AVAIL */
0x04d0, 0x04d0, /* CP_MERCIU_STAT */
0x04d2, 0x04dd, /* CP_WFI_PEND_CTR - CP_EVENTS_IN_FLIGHT */
0x0500, 0x050b, /* CP_PERFCTR_CP_SEL_0 - CP_PERFCOMBINER_SELECT */
0x0578, 0x058f, /* CP_SCRATCH_REG0 - CP_SCRATCH_REG23 */
0x0c00, 0x0c03, /* VSC_BIN_SIZE - VSC_DEBUG_ECO_CONTROL */
0x0c08, 0x0c41, /* VSC_PIPE_CONFIG_0 - VSC_PIPE_PARTIAL_POSN_1 */
0x0c50, 0x0c51, /* VSC_PERFCTR_VSC_SEL_0 - VSC_PERFCTR_VSC_SEL_1 */
0x0e64, 0x0e68, /* VPC_DEBUG_ECO_CONTROL - VPC_PERFCTR_VPC_SEL_3 */
0x2140, 0x216e, /* VPC_ATTR - VPC_SO_FLUSH_WADDR_3 - ctx0 */
0x2540, 0x256e, /* VPC_ATTR - VPC_SO_FLUSH_WADDR_3 - ctx1 */
0x0f00, 0x0f0b, /* TPL1_DEBUG_ECO_CONTROL - TPL1_PERFCTR_TP_SEL_7 */
/* TPL1_TP_TEX_OFFSET - TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR - ctx0 */
0x2380, 0x23a6,
/* TPL1_TP_TEX_OFFSET - TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR - ctx1 */
0x2780, 0x27a6,
0x0ec0, 0x0ecf, /* SP_VS_STATUS - SP_PERFCTR_SP_SEL_11 */
0x22c0, 0x22c1, /* SP_SP_CTRL - SP_INSTR_CACHE_CTRL - ctx0 */
0x22c4, 0x2360, /* SP_VS_CTRL_0 - SP_GS_LENGTH - ctx0 */
0x26c0, 0x26c1, /* SP_SP_CTRL - SP_INSTR_CACHE_CTRL - ctx1 */
0x26c4, 0x2760, /* SP_VS_CTRL_0 - SP_GS_LENGTH - ctx1 */
0x0cc0, 0x0cd2, /* RB_GMEM_BASE_ADDR - RB_PERFCTR_CCU_SEL_3 */
0x20a0, 0x213f, /* RB_MODE_CONTROL - RB_VPORT_Z_CLAMP_MAX_15 - ctx0 */
0x24a0, 0x253f, /* RB_MODE_CONTROL - RB_VPORT_Z_CLAMP_MAX_15 - ctx1 */
0x0e40, 0x0e4a, /* VFD_DEBUG_CONTROL - VFD_PERFCTR_VFD_SEL_7 */
0x2200, 0x2204, /* VFD_CONTROL_0 - VFD_CONTROL_4 - ctx 0 */
0x2208, 0x22a9, /* VFD_INDEX_OFFSET - VFD_DECODE_INSTR_31 - ctx 0 */
0x2600, 0x2604, /* VFD_CONTROL_0 - VFD_CONTROL_4 - ctx 1 */
0x2608, 0x26a9, /* VFD_INDEX_OFFSET - VFD_DECODE_INSTR_31 - ctx 1 */
0x0c80, 0x0c81, /* GRAS_TSE_STATUS - GRAS_DEBUG_ECO_CONTROL */
0x0c88, 0x0c8b, /* GRAS_PERFCTR_TSE_SEL_0 - GRAS_PERFCTR_TSE_SEL_3 */
0x2000, 0x2004, /* GRAS_CL_CLIP_CNTL - GRAS_CL_GB_CLIP_ADJ - ctx 0 */
/* GRAS_CL_VPORT_XOFFSET_0 - GRAS_SC_EXTENT_WINDOW_TL - ctx 0 */
0x2008, 0x209f,
0x2400, 0x2404, /* GRAS_CL_CLIP_CNTL - GRAS_CL_GB_CLIP_ADJ - ctx 1 */
/* GRAS_CL_VPORT_XOFFSET_0 - GRAS_SC_EXTENT_WINDOW_TL - ctx 1 */
0x2408, 0x249f,
0x0e80, 0x0e84, /* UCHE_CACHE_MODE_CONTROL - UCHE_TRAP_BASE_HI */
0x0e88, 0x0e95, /* UCHE_CACHE_STATUS - UCHE_PERFCTR_UCHE_SEL_7 */
0x0e00, 0x0e00, /* HLSQ_TIMEOUT_THRESHOLD - HLSQ_TIMEOUT_THRESHOLD */
0x0e04, 0x0e0e, /* HLSQ_DEBUG_ECO_CONTROL - HLSQ_PERF_PIPE_MASK */
0x23c0, 0x23db, /* HLSQ_CONTROL_0 - HLSQ_UPDATE_CONTROL - ctx 0 */
0x27c0, 0x27db, /* HLSQ_CONTROL_0 - HLSQ_UPDATE_CONTROL - ctx 1 */
0x0d00, 0x0d0c, /* PC_BINNING_COMMAND - PC_DRAWCALL_SETUP_OVERRIDE */
0x0d10, 0x0d17, /* PC_PERFCTR_PC_SEL_0 - PC_PERFCTR_PC_SEL_7 */
0x21c0, 0x21c6, /* PC_BIN_BASE - PC_RESTART_INDEX - ctx 0 */
0x21e5, 0x21e7, /* PC_GS_PARAM - PC_HS_PARAM - ctx 0 */
0x25c0, 0x25c6, /* PC_BIN_BASE - PC_RESTART_INDEX - ctx 1 */
0x25e5, 0x25e7, /* PC_GS_PARAM - PC_HS_PARAM - ctx 1 */
};
const unsigned int a4xx_registers_count = ARRAY_SIZE(a4xx_registers) / 2;
static const struct adreno_vbif_data a420_vbif[] = {
{ A4XX_VBIF_ABIT_SORT, 0x0001001F },
{ A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
{ A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001 },
{ A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
{ A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018 },
{ A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
{ A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018 },
{ A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003 },
{0, 0},
};
const struct adreno_vbif_platform a4xx_vbif_platforms[] = {
{ adreno_is_a420, a420_vbif },
};
static void a4xx_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
adreno_vbif_start(device, a4xx_vbif_platforms,
ARRAY_SIZE(a4xx_vbif_platforms));
/* Make all blocks contribute to the GPU BUSY perf counter */
kgsl_regwrite(device, A4XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
/* Tune the hystersis counters for SP and CP idle detection */
kgsl_regwrite(device, A4XX_RBBM_SP_HYST_CNT, 0x10);
kgsl_regwrite(device, A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
/*
* Enable the RBBM error reporting bits. This lets us get
* useful information on failure
*/
kgsl_regwrite(device, A4XX_RBBM_AHB_CTL0, 0x00000001);
/* Enable AHB error reporting */
kgsl_regwrite(device, A4XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
/*
* Turn on hang detection - this spews a lot of useful information
* into the RBBM registers on a hang
*/
kgsl_regwrite(device, A4XX_RBBM_INTERFACE_HANG_INT_CTL,
(1 << 16) | 0xFFF);
/* Set the OCMEM base address for A4XX */
kgsl_regwrite(device, A4XX_RB_GMEM_BASE_ADDR,
(unsigned int)(adreno_dev->ocmem_base >> 14));
}
/* Register offset defines for A4XX, in order of enum adreno_regs */
static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_DEBUG, A4XX_CP_DEBUG),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A4XX_CP_ME_RAM_WADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, A4XX_CP_ME_RAM_DATA),
ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A4XX_CP_PFP_UCODE_DATA),
ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A4XX_CP_PFP_UCODE_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR, A4XX_CP_RB_RPTR_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_CTRL, A4XX_CP_PROTECT_CTRL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A4XX_CP_ME_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A4XX_CP_RB_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A4XX_CP_IB1_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A4XX_CP_IB1_BUFSZ),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A4XX_CP_IB2_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A4XX_CP_IB2_BUFSZ),
ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, A4XX_CP_SCRATCH_REG0),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_RADDR, A4XX_CP_ME_RAM_RADDR),
ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_ADDR, A4XX_CP_SCRATCH_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_UMSK, A4XX_CP_SCRATCH_UMASK),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A4XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A4XX_RBBM_PERFCTR_CTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
A4XX_RBBM_PERFCTR_LOAD_CMD0),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
A4XX_RBBM_PERFCTR_LOAD_CMD1),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
A4XX_RBBM_PERFCTR_LOAD_CMD2),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
A4XX_RBBM_PERFCTR_PWR_1_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A4XX_RBBM_INT_0_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A4XX_RBBM_INT_0_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_ERROR_STATUS,
A4XX_RBBM_AHB_ERROR_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_CMD, A4XX_RBBM_AHB_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_SEL,
A4XX_VPC_DEBUG_RAM_SEL),
ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_READ,
A4XX_VPC_DEBUG_RAM_READ),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
A4XX_RBBM_INT_CLEAR_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
A4XX_VSC_PIPE_DATA_ADDRESS_0),
ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
A4XX_VSC_PIPE_DATA_LENGTH_7),
ADRENO_REG_DEFINE(ADRENO_REG_VSC_SIZE_ADDRESS, A4XX_VSC_SIZE_ADDRESS),
ADRENO_REG_DEFINE(ADRENO_REG_VFD_CONTROL_0, A4XX_VFD_CONTROL_0),
ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_0_0,
A4XX_VFD_FETCH_INSTR_0_0),
ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_1_F,
A4XX_VFD_FETCH_INSTR_1_31),
ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
A4XX_SP_VS_PVT_MEM_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
A4XX_SP_FS_PVT_MEM_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_OBJ_START_REG,
A4XX_SP_VS_OBJ_START),
ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_OBJ_START_REG,
A4XX_SP_FS_OBJ_START),
};
const struct adreno_reg_offsets a4xx_reg_offsets = {
.offsets = a4xx_register_offsets,
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
struct adreno_gpudev adreno_a4xx_gpudev = {
.reg_offsets = &a4xx_reg_offsets,
.rb_init = a3xx_rb_init,
.irq_control = a3xx_irq_control,
.irq_handler = a3xx_irq_handler,
.irq_pending = a3xx_irq_pending,
.busy_cycles = a3xx_busy_cycles,
.start = a4xx_start,
};
| gpl-2.0 |
StarKissed/android_kernel_htc_mecha | tools/perf/util/header.c | 744 | 27398 | #define _FILE_OFFSET_BITS 64
#include <sys/types.h>
#include <byteswap.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include "util.h"
#include "header.h"
#include "../perf.h"
#include "trace-event.h"
#include "session.h"
#include "symbol.h"
#include "debug.h"
/*
* Create new perf.data header attribute:
*/
struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
{
struct perf_header_attr *self = malloc(sizeof(*self));
if (self != NULL) {
self->attr = *attr;
self->ids = 0;
self->size = 1;
self->id = malloc(sizeof(u64));
if (self->id == NULL) {
free(self);
self = NULL;
}
}
return self;
}
void perf_header_attr__delete(struct perf_header_attr *self)
{
free(self->id);
free(self);
}
int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
{
int pos = self->ids;
self->ids++;
if (self->ids > self->size) {
int nsize = self->size * 2;
u64 *nid = realloc(self->id, nsize * sizeof(u64));
if (nid == NULL)
return -1;
self->size = nsize;
self->id = nid;
}
self->id[pos] = id;
return 0;
}
int perf_header__init(struct perf_header *self)
{
self->size = 1;
self->attr = malloc(sizeof(void *));
return self->attr == NULL ? -ENOMEM : 0;
}
void perf_header__exit(struct perf_header *self)
{
int i;
for (i = 0; i < self->attrs; ++i)
perf_header_attr__delete(self->attr[i]);
free(self->attr);
}
int perf_header__add_attr(struct perf_header *self,
struct perf_header_attr *attr)
{
if (self->frozen)
return -1;
if (self->attrs == self->size) {
int nsize = self->size * 2;
struct perf_header_attr **nattr;
nattr = realloc(self->attr, nsize * sizeof(void *));
if (nattr == NULL)
return -1;
self->size = nsize;
self->attr = nattr;
}
self->attr[self->attrs++] = attr;
return 0;
}
static int event_count;
static struct perf_trace_event_type *events;
int perf_header__push_event(u64 id, const char *name)
{
if (strlen(name) > MAX_EVENT_NAME)
pr_warning("Event %s will be truncated\n", name);
if (!events) {
events = malloc(sizeof(struct perf_trace_event_type));
if (events == NULL)
return -ENOMEM;
} else {
struct perf_trace_event_type *nevents;
nevents = realloc(events, (event_count + 1) * sizeof(*events));
if (nevents == NULL)
return -ENOMEM;
events = nevents;
}
memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
events[event_count].event_id = id;
strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
event_count++;
return 0;
}
char *perf_header__find_event(u64 id)
{
int i;
for (i = 0 ; i < event_count; i++) {
if (events[i].event_id == id)
return events[i].name;
}
return NULL;
}
static const char *__perf_magic = "PERFFILE";
#define PERF_MAGIC (*(u64 *)__perf_magic)
struct perf_file_attr {
struct perf_event_attr attr;
struct perf_file_section ids;
};
void perf_header__set_feat(struct perf_header *self, int feat)
{
set_bit(feat, self->adds_features);
}
bool perf_header__has_feat(const struct perf_header *self, int feat)
{
return test_bit(feat, self->adds_features);
}
static int do_write(int fd, const void *buf, size_t size)
{
while (size) {
int ret = write(fd, buf, size);
if (ret < 0)
return -errno;
size -= ret;
buf += ret;
}
return 0;
}
#define NAME_ALIGN 64
static int write_padded(int fd, const void *bf, size_t count,
size_t count_aligned)
{
static const char zero_buf[NAME_ALIGN];
int err = do_write(fd, bf, count);
if (!err)
err = do_write(fd, zero_buf, count_aligned - count);
return err;
}
#define dsos__for_each_with_build_id(pos, head) \
list_for_each_entry(pos, head, node) \
if (!pos->has_build_id) \
continue; \
else
static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
u16 misc, int fd)
{
struct dso *pos;
dsos__for_each_with_build_id(pos, head) {
int err;
struct build_id_event b;
size_t len;
if (!pos->hit)
continue;
len = pos->long_name_len + 1;
len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
b.pid = pid;
b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
err = write_padded(fd, pos->long_name,
pos->long_name_len + 1, len);
if (err < 0)
return err;
}
return 0;
}
static int machine__write_buildid_table(struct machine *self, int fd)
{
int err;
u16 kmisc = PERF_RECORD_MISC_KERNEL,
umisc = PERF_RECORD_MISC_USER;
if (!machine__is_host(self)) {
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
umisc = PERF_RECORD_MISC_GUEST_USER;
}
err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
kmisc, fd);
if (err == 0)
err = __dsos__write_buildid_table(&self->user_dsos,
self->pid, umisc, fd);
return err;
}
static int dsos__write_buildid_table(struct perf_header *header, int fd)
{
struct perf_session *session = container_of(header,
struct perf_session, header);
struct rb_node *nd;
int err = machine__write_buildid_table(&session->host_machine, fd);
if (err)
return err;
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
break;
}
return err;
}
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms)
{
const size_t size = PATH_MAX;
char *filename = malloc(size),
*linkname = malloc(size), *targetname;
int len, err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
len = snprintf(filename, size, "%s%s%s",
debugdir, is_kallsyms ? "/" : "", name);
if (mkdir_p(filename, 0755))
goto out_free;
snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
} else if (link(name, filename) && copyfile(name, filename))
goto out_free;
}
len = snprintf(linkname, size, "%s/.build-id/%.2s",
debugdir, sbuild_id);
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
goto out_free;
snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
targetname = filename + strlen(debugdir) - 5;
memcpy(targetname, "../..", 5);
if (symlink(targetname, linkname) == 0)
err = 0;
out_free:
free(filename);
free(linkname);
return err;
}
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
const char *name, const char *debugdir,
bool is_kallsyms)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
}
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
{
const size_t size = PATH_MAX;
char *filename = malloc(size),
*linkname = malloc(size);
int err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, sbuild_id + 2);
if (access(linkname, F_OK))
goto out_free;
if (readlink(linkname, filename, size) < 0)
goto out_free;
if (unlink(linkname))
goto out_free;
/*
* Since the link is relative, we must make it absolute:
*/
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, filename);
if (unlink(linkname))
goto out_free;
err = 0;
out_free:
free(filename);
free(linkname);
return err;
}
static int dso__cache_build_id(struct dso *self, const char *debugdir)
{
bool is_kallsyms = self->kernel && self->long_name[0] != '/';
return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
self->long_name, debugdir, is_kallsyms);
}
static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
if (dso__cache_build_id(pos, debugdir))
err = -1;
return err;
}
static int machine__cache_build_ids(struct machine *self, const char *debugdir)
{
int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
return ret;
}
static int perf_session__cache_build_ids(struct perf_session *self)
{
struct rb_node *nd;
int ret;
char debugdir[PATH_MAX];
snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
DEBUG_CACHE_DIR);
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
return -1;
ret = machine__cache_build_ids(&self->host_machine, debugdir);
for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos, debugdir);
}
return ret ? -1 : 0;
}
static bool machine__read_build_ids(struct machine *self, bool with_hits)
{
bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
return ret;
}
static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
{
struct rb_node *nd;
bool ret = machine__read_build_ids(&self->host_machine, with_hits);
for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
return ret;
}
static int perf_header__adds_write(struct perf_header *self, int fd)
{
int nr_sections;
struct perf_session *session;
struct perf_file_section *feat_sec;
int sec_size;
u64 sec_start;
int idx = 0, err;
session = container_of(self, struct perf_session, header);
if (perf_session__read_build_ids(session, true))
perf_header__set_feat(self, HEADER_BUILD_ID);
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
feat_sec = calloc(sizeof(*feat_sec), nr_sections);
if (feat_sec == NULL)
return -ENOMEM;
sec_size = sizeof(*feat_sec) * nr_sections;
sec_start = self->data_offset + self->data_size;
lseek(fd, sec_start + sec_size, SEEK_SET);
if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
struct perf_file_section *trace_sec;
trace_sec = &feat_sec[idx++];
/* Write trace info */
trace_sec->offset = lseek(fd, 0, SEEK_CUR);
read_tracing_data(fd, attrs, nr_counters);
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
}
if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
struct perf_file_section *buildid_sec;
buildid_sec = &feat_sec[idx++];
/* Write build-ids */
buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
err = dsos__write_buildid_table(self, fd);
if (err < 0) {
pr_debug("failed to write buildid table\n");
goto out_free;
}
buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
buildid_sec->offset;
perf_session__cache_build_ids(session);
}
lseek(fd, sec_start, SEEK_SET);
err = do_write(fd, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
out_free:
free(feat_sec);
return err;
}
int perf_header__write_pipe(int fd)
{
struct perf_pipe_file_header f_header;
int err;
f_header = (struct perf_pipe_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
};
err = do_write(fd, &f_header, sizeof(f_header));
if (err < 0) {
pr_debug("failed to write perf pipe header\n");
return err;
}
return 0;
}
int perf_header__write(struct perf_header *self, int fd, bool at_exit)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
struct perf_header_attr *attr;
int i, err;
lseek(fd, sizeof(f_header), SEEK_SET);
for (i = 0; i < self->attrs; i++) {
attr = self->attr[i];
attr->id_offset = lseek(fd, 0, SEEK_CUR);
err = do_write(fd, attr->id, attr->ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
}
}
self->attr_offset = lseek(fd, 0, SEEK_CUR);
for (i = 0; i < self->attrs; i++) {
attr = self->attr[i];
f_attr = (struct perf_file_attr){
.attr = attr->attr,
.ids = {
.offset = attr->id_offset,
.size = attr->ids * sizeof(u64),
}
};
err = do_write(fd, &f_attr, sizeof(f_attr));
if (err < 0) {
pr_debug("failed to write perf header attribute\n");
return err;
}
}
self->event_offset = lseek(fd, 0, SEEK_CUR);
self->event_size = event_count * sizeof(struct perf_trace_event_type);
if (events) {
err = do_write(fd, events, self->event_size);
if (err < 0) {
pr_debug("failed to write perf header events\n");
return err;
}
}
self->data_offset = lseek(fd, 0, SEEK_CUR);
if (at_exit) {
err = perf_header__adds_write(self, fd);
if (err < 0)
return err;
}
f_header = (struct perf_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
.attr_size = sizeof(f_attr),
.attrs = {
.offset = self->attr_offset,
.size = self->attrs * sizeof(f_attr),
},
.data = {
.offset = self->data_offset,
.size = self->data_size,
},
.event_types = {
.offset = self->event_offset,
.size = self->event_size,
},
};
memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
lseek(fd, 0, SEEK_SET);
err = do_write(fd, &f_header, sizeof(f_header));
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
}
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
self->frozen = 1;
return 0;
}
static int perf_header__getbuffer64(struct perf_header *self,
int fd, void *buf, size_t size)
{
if (do_read(fd, buf, size) <= 0)
return -1;
if (self->needs_swap)
mem_bswap_64(buf, size);
return 0;
}
int perf_header__process_sections(struct perf_header *self, int fd,
int (*process)(struct perf_file_section *self,
struct perf_header *ph,
int feat, int fd))
{
struct perf_file_section *feat_sec;
int nr_sections;
int sec_size;
int idx = 0;
int err = -1, feat = 1;
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
feat_sec = calloc(sizeof(*feat_sec), nr_sections);
if (!feat_sec)
return -1;
sec_size = sizeof(*feat_sec) * nr_sections;
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
goto out_free;
err = 0;
while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
if (perf_header__has_feat(self, feat)) {
struct perf_file_section *sec = &feat_sec[idx++];
err = process(sec, self, feat, fd);
if (err < 0)
break;
}
++feat;
}
out_free:
free(feat_sec);
return err;
}
int perf_file_header__read(struct perf_file_header *self,
struct perf_header *ph, int fd)
{
lseek(fd, 0, SEEK_SET);
if (do_read(fd, self, sizeof(*self)) <= 0 ||
memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
return -1;
if (self->attr_size != sizeof(struct perf_file_attr)) {
u64 attr_size = bswap_64(self->attr_size);
if (attr_size != sizeof(struct perf_file_attr))
return -1;
mem_bswap_64(self, offsetof(struct perf_file_header,
adds_features));
ph->needs_swap = true;
}
if (self->size != sizeof(*self)) {
/* Support the previous format */
if (self->size == offsetof(typeof(*self), adds_features))
bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
else
return -1;
}
memcpy(&ph->adds_features, &self->adds_features,
sizeof(ph->adds_features));
/*
* FIXME: hack that assumes that if we need swap the perf.data file
* may be coming from an arch with a different word-size, ergo different
* DEFINE_BITMAP format, investigate more later, but for now its mostly
* safe to assume that we have a build-id section. Trace files probably
* have several other issues in this realm anyway...
*/
if (ph->needs_swap) {
memset(&ph->adds_features, 0, sizeof(ph->adds_features));
perf_header__set_feat(ph, HEADER_BUILD_ID);
}
ph->event_offset = self->event_types.offset;
ph->event_size = self->event_types.size;
ph->data_offset = self->data.offset;
ph->data_size = self->data.size;
return 0;
}
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
{
int err = -1;
struct list_head *head;
struct machine *machine;
u16 misc;
struct dso *dso;
enum dso_kernel_type dso_type;
machine = perf_session__findnew_machine(session, bev->pid);
if (!machine)
goto out;
misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
switch (misc) {
case PERF_RECORD_MISC_KERNEL:
dso_type = DSO_TYPE_KERNEL;
head = &machine->kernel_dsos;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
dso_type = DSO_TYPE_GUEST_KERNEL;
head = &machine->kernel_dsos;
break;
case PERF_RECORD_MISC_USER:
case PERF_RECORD_MISC_GUEST_USER:
dso_type = DSO_TYPE_USER;
head = &machine->user_dsos;
break;
default:
goto out;
}
dso = __dsos__findnew(head, filename);
if (dso != NULL) {
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
dso__set_build_id(dso, &bev->build_id);
if (filename[0] == '[')
dso->kernel = dso_type;
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
sbuild_id);
pr_debug("build id event received for %s: %s\n",
dso->long_name, sbuild_id);
}
err = 0;
out:
return err;
}
static int perf_header__read_build_ids(struct perf_header *self,
int input, u64 offset, u64 size)
{
struct perf_session *session = container_of(self,
struct perf_session, header);
struct build_id_event bev;
char filename[PATH_MAX];
u64 limit = offset + size;
int err = -1;
while (offset < limit) {
ssize_t len;
if (read(input, &bev, sizeof(bev)) != sizeof(bev))
goto out;
if (self->needs_swap)
perf_event_header__bswap(&bev.header);
len = bev.header.size - sizeof(bev);
if (read(input, filename, len) != len)
goto out;
__event_process_build_id(&bev, filename, session);
offset += bev.header.size;
}
err = 0;
out:
return err;
}
static int perf_file_section__process(struct perf_file_section *self,
struct perf_header *ph,
int feat, int fd)
{
if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
pr_debug("Failed to lseek to %Ld offset for feature %d, "
"continuing...\n", self->offset, feat);
return 0;
}
switch (feat) {
case HEADER_TRACE_INFO:
trace_report(fd, false);
break;
case HEADER_BUILD_ID:
if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
pr_debug("Failed to read buildids, continuing...\n");
break;
default:
pr_debug("unknown feature %d, continuing...\n", feat);
}
return 0;
}
static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
struct perf_header *ph, int fd,
bool repipe)
{
if (do_read(fd, self, sizeof(*self)) <= 0 ||
memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
return -1;
if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
return -1;
if (self->size != sizeof(*self)) {
u64 size = bswap_64(self->size);
if (size != sizeof(*self))
return -1;
ph->needs_swap = true;
}
return 0;
}
static int perf_header__read_pipe(struct perf_session *session, int fd)
{
struct perf_header *self = &session->header;
struct perf_pipe_file_header f_header;
if (perf_file_header__read_pipe(&f_header, self, fd,
session->repipe) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
session->fd = fd;
return 0;
}
int perf_header__read(struct perf_session *session, int fd)
{
struct perf_header *self = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
if (session->fd_pipe)
return perf_header__read_pipe(session, fd);
if (perf_file_header__read(&f_header, self, fd) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
nr_attrs = f_header.attrs.size / sizeof(f_attr);
lseek(fd, f_header.attrs.offset, SEEK_SET);
for (i = 0; i < nr_attrs; i++) {
struct perf_header_attr *attr;
off_t tmp;
if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
goto out_errno;
tmp = lseek(fd, 0, SEEK_CUR);
attr = perf_header_attr__new(&f_attr.attr);
if (attr == NULL)
return -ENOMEM;
nr_ids = f_attr.ids.size / sizeof(u64);
lseek(fd, f_attr.ids.offset, SEEK_SET);
for (j = 0; j < nr_ids; j++) {
if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
goto out_errno;
if (perf_header_attr__add_id(attr, f_id) < 0) {
perf_header_attr__delete(attr);
return -ENOMEM;
}
}
if (perf_header__add_attr(self, attr) < 0) {
perf_header_attr__delete(attr);
return -ENOMEM;
}
lseek(fd, tmp, SEEK_SET);
}
if (f_header.event_types.size) {
lseek(fd, f_header.event_types.offset, SEEK_SET);
events = malloc(f_header.event_types.size);
if (events == NULL)
return -ENOMEM;
if (perf_header__getbuffer64(self, fd, events,
f_header.event_types.size))
goto out_errno;
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
perf_header__process_sections(self, fd, perf_file_section__process);
lseek(fd, self->data_offset, SEEK_SET);
self->frozen = 1;
return 0;
out_errno:
return -errno;
}
u64 perf_header__sample_type(struct perf_header *header)
{
u64 type = 0;
int i;
for (i = 0; i < header->attrs; i++) {
struct perf_header_attr *attr = header->attr[i];
if (!type)
type = attr->attr.sample_type;
else if (type != attr->attr.sample_type)
die("non matching sample_type");
}
return type;
}
struct perf_event_attr *
perf_header__find_attr(u64 id, struct perf_header *header)
{
int i;
/*
* We set id to -1 if the data file doesn't contain sample
* ids. Check for this and avoid walking through the entire
* list of ids which may be large.
*/
if (id == -1ULL)
return NULL;
for (i = 0; i < header->attrs; i++) {
struct perf_header_attr *attr = header->attr[i];
int j;
for (j = 0; j < attr->ids; j++) {
if (attr->id[j] == id)
return &attr->attr;
}
}
return NULL;
}
int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
event__handler_t process,
struct perf_session *session)
{
event_t *ev;
size_t size;
int err;
size = sizeof(struct perf_event_attr);
size = ALIGN(size, sizeof(u64));
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
ev = malloc(size);
ev->attr.attr = *attr;
memcpy(ev->attr.id, id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
ev->attr.header.size = size;
err = process(ev, session);
free(ev);
return err;
}
int event__synthesize_attrs(struct perf_header *self,
event__handler_t process,
struct perf_session *session)
{
struct perf_header_attr *attr;
int i, err = 0;
for (i = 0; i < self->attrs; i++) {
attr = self->attr[i];
err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
process, session);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
}
}
return err;
}
int event__process_attr(event_t *self, struct perf_session *session)
{
struct perf_header_attr *attr;
unsigned int i, ids, n_ids;
attr = perf_header_attr__new(&self->attr.attr);
if (attr == NULL)
return -ENOMEM;
ids = self->header.size;
ids -= (void *)&self->attr.id - (void *)self;
n_ids = ids / sizeof(u64);
for (i = 0; i < n_ids; i++) {
if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
perf_header_attr__delete(attr);
return -ENOMEM;
}
}
if (perf_header__add_attr(&session->header, attr) < 0) {
perf_header_attr__delete(attr);
return -ENOMEM;
}
perf_session__update_sample_type(session);
return 0;
}
int event__synthesize_event_type(u64 event_id, char *name,
event__handler_t process,
struct perf_session *session)
{
event_t ev;
size_t size = 0;
int err = 0;
memset(&ev, 0, sizeof(ev));
ev.event_type.event_type.event_id = event_id;
memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
size = strlen(name);
size = ALIGN(size, sizeof(u64));
ev.event_type.header.size = sizeof(ev.event_type) -
(sizeof(ev.event_type.event_type.name) - size);
err = process(&ev, session);
return err;
}
int event__synthesize_event_types(event__handler_t process,
struct perf_session *session)
{
struct perf_trace_event_type *type;
int i, err = 0;
for (i = 0; i < event_count; i++) {
type = &events[i];
err = event__synthesize_event_type(type->event_id, type->name,
process, session);
if (err) {
pr_debug("failed to create perf header event type\n");
return err;
}
}
return err;
}
int event__process_event_type(event_t *self,
struct perf_session *session __unused)
{
if (perf_header__push_event(self->event_type.event_type.event_id,
self->event_type.event_type.name) < 0)
return -ENOMEM;
return 0;
}
int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
int nb_events,
event__handler_t process,
struct perf_session *session __unused)
{
event_t ev;
ssize_t size = 0, aligned_size = 0, padding;
int err = 0;
memset(&ev, 0, sizeof(ev));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = read_tracing_data_size(fd, pattrs, nb_events);
if (size <= 0)
return size;
aligned_size = ALIGN(size, sizeof(u64));
padding = aligned_size - size;
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
process(&ev, session);
err = read_tracing_data(fd, pattrs, nb_events);
write_padded(fd, NULL, 0, padding);
return aligned_size;
}
int event__process_tracing_data(event_t *self,
struct perf_session *session)
{
ssize_t size_read, padding, size = self->tracing_data.size;
off_t offset = lseek(session->fd, 0, SEEK_CUR);
char buf[BUFSIZ];
/* setup for reading amidst mmap */
lseek(session->fd, offset + sizeof(struct tracing_data_event),
SEEK_SET);
size_read = trace_report(session->fd, session->repipe);
padding = ALIGN(size_read, sizeof(u64)) - size_read;
if (read(session->fd, buf, padding) < 0)
die("reading input file");
if (session->repipe) {
int retw = write(STDOUT_FILENO, buf, padding);
if (retw <= 0 || retw != padding)
die("repiping tracing data padding");
}
if (size_read + padding != size)
die("tracing data size mismatch");
return size_read + padding;
}
int event__synthesize_build_id(struct dso *pos, u16 misc,
event__handler_t process,
struct machine *machine,
struct perf_session *session)
{
event_t ev;
size_t len;
int err = 0;
if (!pos->hit)
return err;
memset(&ev, 0, sizeof(ev));
len = pos->long_name_len + 1;
len = ALIGN(len, NAME_ALIGN);
memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc;
ev.build_id.pid = machine->pid;
ev.build_id.header.size = sizeof(ev.build_id) + len;
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
err = process(&ev, session);
return err;
}
int event__process_build_id(event_t *self,
struct perf_session *session)
{
__event_process_build_id(&self->build_id,
self->build_id.filename,
session);
return 0;
}
| gpl-2.0 |
giamteckchoon/linux-pvops-2.6.32 | drivers/video/geode/suspend_gx.c | 744 | 6239 | /*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/fb.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/geode.h>
#include <asm/delay.h>
#include "gxfb.h"
#ifdef CONFIG_PM
static void gx_save_regs(struct gxfb_par *par)
{
int i;
/* wait for the BLT engine to stop being busy */
do {
i = read_gp(par, GP_BLT_STATUS);
} while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
/* save MSRs */
rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
/* save registers */
memcpy(par->gp, par->gp_regs, sizeof(par->gp));
memcpy(par->dc, par->dc_regs, sizeof(par->dc));
memcpy(par->vp, par->vid_regs, sizeof(par->vp));
memcpy(par->fp, par->vid_regs + VP_FP_START, sizeof(par->fp));
/* save the palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->pal); i++)
par->pal[i] = read_dc(par, DC_PAL_DATA);
}
static void gx_set_dotpll(uint32_t dotpll_hi)
{
uint32_t dotpll_lo;
int i;
rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
/* wait for the PLL to lock */
for (i = 0; i < 200; i++) {
rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
break;
udelay(1);
}
/* PLL set, unlock */
dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
}
static void gx_restore_gfx_proc(struct gxfb_par *par)
{
int i;
for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
switch (i) {
case GP_VECTOR_MODE:
case GP_BLT_MODE:
case GP_BLT_STATUS:
case GP_HST_SRC:
/* don't restore these registers */
break;
default:
write_gp(par, i, par->gp[i]);
}
}
}
static void gx_restore_display_ctlr(struct gxfb_par *par)
{
int i;
for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
switch (i) {
case DC_UNLOCK:
/* unlock the DC; runs first */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
break;
case DC_GENERAL_CFG:
/* write without the enables */
write_dc(par, i, par->dc[i] & ~(DC_GENERAL_CFG_VIDE |
DC_GENERAL_CFG_ICNE |
DC_GENERAL_CFG_CURE |
DC_GENERAL_CFG_DFLE));
break;
case DC_DISPLAY_CFG:
/* write without the enables */
write_dc(par, i, par->dc[i] & ~(DC_DISPLAY_CFG_VDEN |
DC_DISPLAY_CFG_GDEN |
DC_DISPLAY_CFG_TGEN));
break;
case DC_RSVD_0:
case DC_RSVD_1:
case DC_RSVD_2:
case DC_RSVD_3:
case DC_RSVD_4:
case DC_LINE_CNT:
case DC_PAL_ADDRESS:
case DC_PAL_DATA:
case DC_DFIFO_DIAG:
case DC_CFIFO_DIAG:
case DC_RSVD_5:
/* don't restore these registers */
break;
default:
write_dc(par, i, par->dc[i]);
}
}
/* restore the palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->pal); i++)
write_dc(par, DC_PAL_DATA, par->pal[i]);
}
static void gx_restore_video_proc(struct gxfb_par *par)
{
int i;
wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
switch (i) {
case VP_VCFG:
/* don't enable video yet */
write_vp(par, i, par->vp[i] & ~VP_VCFG_VID_EN);
break;
case VP_DCFG:
/* don't enable CRT yet */
write_vp(par, i, par->vp[i] &
~(VP_DCFG_DAC_BL_EN | VP_DCFG_VSYNC_EN |
VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
break;
case VP_GAR:
case VP_GDR:
case VP_RSVD_0:
case VP_RSVD_1:
case VP_RSVD_2:
case VP_RSVD_3:
case VP_CRC32:
case VP_AWT:
case VP_VTM:
/* don't restore these registers */
break;
default:
write_vp(par, i, par->vp[i]);
}
}
}
static void gx_restore_regs(struct gxfb_par *par)
{
int i;
gx_set_dotpll((uint32_t) (par->msr.dotpll >> 32));
gx_restore_gfx_proc(par);
gx_restore_display_ctlr(par);
gx_restore_video_proc(par);
/* Flat Panel */
for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
if (i != FP_PM && i != FP_RSVD_0)
write_fp(par, i, par->fp[i]);
}
}
static void gx_disable_graphics(struct gxfb_par *par)
{
/* shut down the engine */
write_vp(par, VP_VCFG, par->vp[VP_VCFG] & ~VP_VCFG_VID_EN);
write_vp(par, VP_DCFG, par->vp[VP_DCFG] & ~(VP_DCFG_DAC_BL_EN |
VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
/* turn off the flat panel */
write_fp(par, FP_PM, par->fp[FP_PM] & ~FP_PM_P);
/* turn off display */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG] &
~(DC_GENERAL_CFG_VIDE | DC_GENERAL_CFG_ICNE |
DC_GENERAL_CFG_CURE | DC_GENERAL_CFG_DFLE));
write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG] &
~(DC_DISPLAY_CFG_VDEN | DC_DISPLAY_CFG_GDEN |
DC_DISPLAY_CFG_TGEN));
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
static void gx_enable_graphics(struct gxfb_par *par)
{
uint32_t fp;
fp = read_fp(par, FP_PM);
if (par->fp[FP_PM] & FP_PM_P) {
/* power on the panel if not already power{ed,ing} on */
if (!(fp & (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
write_fp(par, FP_PM, par->fp[FP_PM]);
} else {
/* power down the panel if not already power{ed,ing} down */
if (!(fp & (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
write_fp(par, FP_PM, par->fp[FP_PM]);
}
/* turn everything on */
write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
/* do this last; it will enable the FIFO load */
write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
/* lock the door behind us */
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
int gx_powerdown(struct fb_info *info)
{
struct gxfb_par *par = info->par;
if (par->powered_down)
return 0;
gx_save_regs(par);
gx_disable_graphics(par);
par->powered_down = 1;
return 0;
}
int gx_powerup(struct fb_info *info)
{
struct gxfb_par *par = info->par;
if (!par->powered_down)
return 0;
gx_restore_regs(par);
gx_enable_graphics(par);
par->powered_down = 0;
return 0;
}
#endif
| gpl-2.0 |
linux-shield/kernel | drivers/gpu/drm/radeon/radeon_cursor.c | 1000 | 9756 | /*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
{
struct radeon_device *rdev = crtc->dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
if (ASIC_IS_DCE4(rdev)) {
cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
else
cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
} else if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
else
cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
} else {
cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= RADEON_CUR_LOCK;
else
cur_lock &= ~RADEON_CUR_LOCK;
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
}
}
static void radeon_hide_cursor(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
u32 reg;
switch (radeon_crtc->crtc_id) {
case 0:
reg = RADEON_CRTC_GEN_CNTL;
break;
case 1:
reg = RADEON_CRTC2_GEN_CNTL;
break;
default:
return;
}
WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
}
}
static void radeon_show_cursor(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
break;
case 1:
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
break;
default:
return;
}
WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
(RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
}
}
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
uint64_t gpu_addr)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(gpu_addr));
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
gpu_addr & 0xffffffff);
} else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
else
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
}
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
gpu_addr & 0xffffffff);
} else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
}
}
int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct radeon_bo *robj;
uint64_t gpu_addr;
int ret;
if (!handle) {
/* turn off cursor */
radeon_hide_cursor(crtc);
obj = NULL;
goto unpin;
}
if ((width > radeon_crtc->max_cursor_width) ||
(height > radeon_crtc->max_cursor_height)) {
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
return -EINVAL;
}
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
return -ENOENT;
}
robj = gem_to_radeon_bo(obj);
ret = radeon_bo_reserve(robj, false);
if (unlikely(ret != 0))
goto fail;
/* Only 27 bit offset for legacy cursor */
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
&gpu_addr);
radeon_bo_unreserve(robj);
if (ret)
goto fail;
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
radeon_lock_cursor(crtc, true);
radeon_set_cursor(crtc, obj, gpu_addr);
radeon_show_cursor(crtc);
radeon_lock_cursor(crtc, false);
unpin:
if (radeon_crtc->cursor_bo) {
robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
ret = radeon_bo_reserve(robj, false);
if (likely(ret == 0)) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
return 0;
fail:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
if (ASIC_IS_AVIVO(rdev)) {
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
}
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
if (x < 0) {
xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
x = 0;
}
if (y < 0) {
yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
y = 0;
}
/* fixed on DCE6 and newer */
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
/*
* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
*
* NOTE: It is safe to access crtc->enabled of other crtcs
* without holding either the mode_config lock or the other
* crtc's lock as long as write access to this flag _always_
* grabs all locks.
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
i++;
}
if (i > 1) {
int cursor_end, frame_end;
cursor_end = x - xorigin + w;
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
if (cursor_end >= frame_end) {
w = w - (cursor_end - frame_end);
if (!(frame_end & 0x7f))
w--;
} else {
if (!(cursor_end & 0x7f))
w--;
}
if (w <= 0) {
w = 1;
cursor_end = x - xorigin + w;
if (!(cursor_end & 0x7f)) {
x--;
WARN_ON_ONCE(x < 0);
}
}
}
}
radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else {
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
y *= 2;
WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
| (xorigin << 16)
| yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
| (x << 16)
| y));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
}
radeon_lock_cursor(crtc, false);
return 0;
}
| gpl-2.0 |
IdeosDev/semc_urushi_kernel_3.0 | block/bsg.c | 1512 | 24300 | /*
* bsg.c - block layer implementation of the sg v4 interface
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
* Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file "COPYING" in the main directory of this
* archive for more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
int queued_cmds;
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
char name[20];
int max_queue;
unsigned long flags;
};
enum {
BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
#undef BSG_DEBUG
#ifdef BSG_DEBUG
#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else
#define dprintk(fmt, args...)
#endif
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
#define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
static struct kmem_cache *bsg_cmd_cachep;
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device *bd;
struct list_head list;
struct request *rq;
struct bio *bio;
struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
char sense[SCSI_SENSE_BUFFERSIZE];
};
static void bsg_free_command(struct bsg_command *bc)
{
struct bsg_device *bd = bc->bd;
unsigned long flags;
kmem_cache_free(bsg_cmd_cachep, bc);
spin_lock_irqsave(&bd->lock, flags);
bd->queued_cmds--;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_free);
}
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
if (bd->queued_cmds >= bd->max_queue)
goto out;
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
bc = ERR_PTR(-ENOMEM);
goto out;
}
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
dprintk("%s: returning free cmd %p\n", bd->name, bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
return bc;
}
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
static int bsg_io_schedule(struct bsg_device *bd)
{
DEFINE_WAIT(wait);
int ret = 0;
spin_lock_irq(&bd->lock);
BUG_ON(bd->done_cmds > bd->queued_cmds);
/*
* -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
* work to do", even though we return -ENOSPC after this same test
* during bsg_write() -- there, it means our buffer can't have more
* bsg_commands added to it, thus has no space left.
*/
if (bd->done_cmds == bd->queued_cmds) {
ret = -ENODATA;
goto unlock;
}
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
ret = -EAGAIN;
goto unlock;
}
prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bd->lock);
io_schedule();
finish_wait(&bd->wq_done, &wait);
return ret;
unlock:
spin_unlock_irq(&bd->lock);
return ret;
}
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
fmode_t has_write_perm)
{
if (hdr->request_len > BLK_MAX_CDB) {
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
if (!rq->cmd)
return -ENOMEM;
}
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
if (blk_verify_command(rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* fill in request structure
*/
rq->cmd_len = hdr->request_len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}
/*
* Check if sg_io_v4 from user is allowed and valid
*/
static int
bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
if (hdr->guard != 'Q')
return -EINVAL;
switch (hdr->protocol) {
case BSG_PROTOCOL_SCSI:
switch (hdr->subprotocol) {
case BSG_SUB_PROTOCOL_SCSI_CMD:
case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
break;
default:
ret = -EINVAL;
}
break;
default:
ret = -EINVAL;
}
*rw = hdr->dout_xfer_len ? WRITE : READ;
return ret;
}
/*
* map sg_io_v4 to a request.
*/
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
u8 *sense)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev;
/* if the LLD has been removed then the bsg_unregister_queue will
* eventually be called and the class_dev was freed, so we can no
* longer use this request_queue. Return no such address.
*/
if (!bcd->class_dev)
return ERR_PTR(-ENXIO);
dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements separately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (!rq)
return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
if (ret)
goto out;
if (rw == WRITE && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
next_rq = blk_get_request(q, READ, GFP_KERNEL);
if (!next_rq) {
ret = -ENOMEM;
goto out;
}
rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type;
dxferp = (void*)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out;
}
if (hdr->dout_xfer_len) {
dxfer_len = hdr->dout_xfer_len;
dxferp = (void*)(unsigned long)hdr->dout_xferp;
} else if (hdr->din_xfer_len) {
dxfer_len = hdr->din_xfer_len;
dxferp = (void*)(unsigned long)hdr->din_xferp;
} else
dxfer_len = 0;
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
GFP_KERNEL);
if (ret)
goto out;
}
rq->sense = sense;
rq->sense_len = 0;
return rq;
out:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
blk_put_request(next_rq);
}
return ERR_PTR(ret);
}
/*
* async completion call-back from the block layer, when scsi/ide/whatever
* calls end_that_request_last() on a request
*/
static void bsg_rq_end_io(struct request *rq, int uptodate)
{
struct bsg_command *bc = rq->end_io_data;
struct bsg_device *bd = bc->bd;
unsigned long flags;
dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
bd->name, rq, bc, bc->bio, uptodate);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
list_move_tail(&bc->list, &bd->done_list);
bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_done);
}
/*
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
/*
* add bc command to busy queue and submit rq for io
*/
bc->rq = rq;
bc->bio = rq->bio;
if (rq->next_rq)
bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
bc = list_first_entry(&bd->done_list, struct bsg_command, list);
list_del(&bc->list);
bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
return bc;
}
/*
* Get a finished command from the done list
*/
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
do {
bc = bsg_next_done_cmd(bd);
if (bc)
break;
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
bc = ERR_PTR(-EAGAIN);
break;
}
ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
dprintk("%s: returning done %p\n", bd->name, bc);
return bc;
}
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
int ret = 0;
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
/*
* fill in all the output members
*/
hdr->device_status = rq->errors & 0xff;
hdr->transport_status = host_byte(rq->errors);
hdr->driver_status = driver_byte(rq->errors);
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len);
ret = copy_to_user((void*)(unsigned long)hdr->response,
rq->sense, len);
if (!ret)
hdr->response_len = len;
else
ret = -EFAULT;
}
if (rq->next_rq) {
hdr->dout_resid = rq->resid_len;
hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->resid_len;
else
hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it
* (providing we aren't already returning an error); if it's
* just a protocol response (i.e. non negative), that gets
* processed above.
*/
if (!ret && rq->errors < 0)
ret = rq->errors;
blk_rq_unmap_user(bio);
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
blk_put_request(rq);
return ret;
}
static int bsg_complete_all_commands(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret, tret;
dprintk("%s: entered\n", bd->name);
/*
* wait for all commands to complete
*/
ret = 0;
do {
ret = bsg_io_schedule(bd);
/*
* look for -ENODATA specifically -- we'll sometimes get
* -ERESTARTSYS when we've taken a signal, but we can't
* return until we're done freeing the queue, so ignore
* it. The signal will get handled when we're done freeing
* the bsg_device.
*/
} while (ret != -ENODATA);
/*
* discard done commands
*/
ret = 0;
do {
spin_lock_irq(&bd->lock);
if (!bd->queued_cmds) {
spin_unlock_irq(&bd->lock);
break;
}
spin_unlock_irq(&bd->lock);
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc))
break;
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (!ret)
ret = tret;
bsg_free_command(bc);
} while (1);
return ret;
}
static int
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
}
/*
* this is the only case where we need to copy data back
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
if (ret)
break;
buf += sizeof(struct sg_io_v4);
*bytes_read += sizeof(struct sg_io_v4);
nr_commands--;
}
return ret;
}
static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
{
if (file->f_flags & O_NONBLOCK)
clear_bit(BSG_F_BLOCK, &bd->flags);
else
set_bit(BSG_F_BLOCK, &bd->flags);
}
/*
* Check if the error is a "real" error that we should return.
*/
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
return 1;
return 0;
}
static ssize_t
bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
int ret;
ssize_t bytes_read;
dprintk("%s: read %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || (bytes_read && err_block_err(ret)))
bytes_read = ret;
return bytes_read;
}
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_written,
fmode_t has_write_perm)
{
struct bsg_command *bc;
struct request *rq;
int ret, nr_commands;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
nr_commands = count / sizeof(struct sg_io_v4);
rq = NULL;
bc = NULL;
ret = 0;
while (nr_commands) {
struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
break;
}
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
ret = -EFAULT;
break;
}
/*
* get a request, fill in the blanks, and add to request queue
*/
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
break;
}
bsg_add_command(bd, q, bc, rq);
bc = NULL;
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
*bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
bsg_free_command(bc);
return ret;
}
static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written,
file->f_mode & FMODE_WRITE);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_written || (bytes_written && err_block_err(ret)))
bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
return bytes_written;
}
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
if (unlikely(!bd))
return NULL;
spin_lock_init(&bd->lock);
bd->max_queue = BSG_DEFAULT_CMDS;
INIT_LIST_HEAD(&bd->busy_list);
INIT_LIST_HEAD(&bd->done_list);
INIT_HLIST_NODE(&bd->dev_list);
init_waitqueue_head(&bd->wq_free);
init_waitqueue_head(&bd->wq_done);
return bd;
}
static void bsg_kref_release_function(struct kref *kref)
{
struct bsg_class_device *bcd =
container_of(kref, struct bsg_class_device, ref);
struct device *parent = bcd->parent;
if (bcd->release)
bcd->release(bcd->parent);
put_device(parent);
}
static int bsg_put_device(struct bsg_device *bd)
{
int ret = 0, do_free;
struct request_queue *q = bd->queue;
mutex_lock(&bsg_mutex);
do_free = atomic_dec_and_test(&bd->ref_count);
if (!do_free) {
mutex_unlock(&bsg_mutex);
goto out;
}
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
dprintk("%s: tearing down\n", bd->name);
/*
* close can always block
*/
set_bit(BSG_F_BLOCK, &bd->flags);
/*
* correct error detection baddies here again. it's the responsibility
* of the app to properly reap commands before close() if it wants
* fool-proof error detection
*/
ret = bsg_complete_all_commands(bd);
kfree(bd);
out:
kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
if (do_free)
blk_put_queue(q);
return ret;
}
static struct bsg_device *bsg_add_device(struct inode *inode,
struct request_queue *rq,
struct file *file)
{
struct bsg_device *bd;
int ret;
#ifdef BSG_DEBUG
unsigned char buf[32];
#endif
ret = blk_get_queue(rq);
if (ret)
return ERR_PTR(-ENXIO);
bd = bsg_alloc_device();
if (!bd) {
blk_put_queue(rq);
return ERR_PTR(-ENOMEM);
}
bd->queue = rq;
bsg_set_block(bd, file);
atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
struct hlist_node *entry;
mutex_lock(&bsg_mutex);
hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
}
}
bd = NULL;
found:
mutex_unlock(&bsg_mutex);
return bd;
}
static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
struct bsg_class_device *bcd;
/*
* find the class device
*/
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
if (bcd)
kref_get(&bcd->ref);
mutex_unlock(&bsg_mutex);
if (!bcd)
return ERR_PTR(-ENODEV);
bd = __bsg_get_device(iminor(inode), bcd->queue);
if (bd)
return bd;
bd = bsg_add_device(inode, bcd->queue, file);
if (IS_ERR(bd))
kref_put(&bcd->ref, bsg_kref_release_function);
return bd;
}
static int bsg_open(struct inode *inode, struct file *file)
{
struct bsg_device *bd;
bd = bsg_get_device(inode, file);
if (IS_ERR(bd))
return PTR_ERR(bd);
file->private_data = bd;
return 0;
}
static int bsg_release(struct inode *inode, struct file *file)
{
struct bsg_device *bd = file->private_data;
file->private_data = NULL;
return bsg_put_device(bd);
}
static unsigned int bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
unsigned int mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
spin_lock_irq(&bd->lock);
if (!list_empty(&bd->done_list))
mask |= POLLIN | POLLRDNORM;
if (bd->queued_cmds >= bd->max_queue)
mask |= POLLOUT;
spin_unlock_irq(&bd->lock);
return mask;
}
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
int ret;
switch (cmd) {
/*
* our own ioctls
*/
case SG_GET_COMMAND_Q:
return put_user(bd->max_queue, uarg);
case SG_SET_COMMAND_Q: {
int queue;
if (get_user(queue, uarg))
return -EFAULT;
if (queue < 1)
return -EINVAL;
spin_lock_irq(&bd->lock);
bd->max_queue = queue;
spin_unlock_irq(&bd->lock);
return 0;
}
/*
* SCSI/sg ioctls
*/
case SG_GET_VERSION_NUM:
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
case SG_SET_TIMEOUT:
case SG_GET_TIMEOUT:
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
case SCSI_IOCTL_SEND_COMMAND: {
void __user *uarg = (void __user *) arg;
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
}
case SG_IO: {
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
int at_head;
u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
if (IS_ERR(rq))
return PTR_ERR(rq);
bio = rq->bio;
if (rq->next_rq)
bidi_bio = rq->next_rq->bio;
at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(bd->queue, NULL, rq, at_head);
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
return ret;
}
/*
* block device ioctls
*/
default:
#if 0
return ioctl_by_bdev(bd->bdev, cmd, arg);
#else
return -ENOTTY;
#endif
}
}
static const struct file_operations bsg_fops = {
.read = bsg_read,
.write = bsg_write,
.poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.unlocked_ioctl = bsg_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void bsg_unregister_queue(struct request_queue *q)
{
struct bsg_class_device *bcd = &q->bsg_dev;
if (!bcd->class_dev)
return;
mutex_lock(&bsg_mutex);
idr_remove(&bsg_minor_idr, bcd->minor);
if (q->kobj.sd)
sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
bcd->class_dev = NULL;
kref_put(&bcd->ref, bsg_kref_release_function);
mutex_unlock(&bsg_mutex);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
int bsg_register_queue(struct request_queue *q, struct device *parent,
const char *name, void (*release)(struct device *))
{
struct bsg_class_device *bcd;
dev_t dev;
int ret, minor;
struct device *class_dev = NULL;
const char *devname;
if (name)
devname = name;
else
devname = dev_name(parent);
/*
* we need a proper transport to send commands, not a stacked device
*/
if (!q->request_fn)
return 0;
bcd = &q->bsg_dev;
memset(bcd, 0, sizeof(*bcd));
mutex_lock(&bsg_mutex);
ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
if (!ret) {
ret = -ENOMEM;
goto unlock;
}
ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
if (ret < 0)
goto unlock;
if (minor >= BSG_MAX_DEVS) {
printk(KERN_ERR "bsg: too many bsg devices\n");
ret = -EINVAL;
goto remove_idr;
}
bcd->minor = minor;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
kref_init(&bcd->ref);
dev = MKDEV(bsg_major, bcd->minor);
class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto put_dev;
}
bcd->class_dev = class_dev;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
if (ret)
goto unregister_class_dev;
}
mutex_unlock(&bsg_mutex);
return 0;
unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
remove_idr:
idr_remove(&bsg_minor_idr, minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
static struct cdev bsg_cdev;
static char *bsg_devnode(struct device *dev, mode_t *mode)
{
return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
}
static int __init bsg_init(void)
{
int ret, i;
dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
}
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
if (IS_ERR(bsg_class)) {
ret = PTR_ERR(bsg_class);
goto destroy_kmemcache;
}
bsg_class->devnode = bsg_devnode;
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
if (ret)
goto destroy_bsg_class;
bsg_major = MAJOR(devid);
cdev_init(&bsg_cdev, &bsg_fops);
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
if (ret)
goto unregister_chrdev;
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
" loaded (major %d)\n", bsg_major);
return 0;
unregister_chrdev:
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
destroy_bsg_class:
class_destroy(bsg_class);
destroy_kmemcache:
kmem_cache_destroy(bsg_cmd_cachep);
return ret;
}
MODULE_AUTHOR("Jens Axboe");
MODULE_DESCRIPTION(BSG_DESCRIPTION);
MODULE_LICENSE("GPL");
device_initcall(bsg_init);
| gpl-2.0 |
k2wl/i9105Sammy | arch/arm/mach-imx/eukrea_mbimx27-baseboard.c | 2280 | 9387 | /*
* Copyright (C) 2009-2010 Eric Benard - eric@eukrea.com
*
* Based on pcm970-baseboard.c which is :
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/backlight.h>
#include <video/platform_lcd.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
#include <mach/iomux-mx27.h>
#include <mach/hardware.h>
#include <mach/audmux.h>
#include "devices-imx27.h"
static const int eukrea_mbimx27_pins[] __initconst = {
/* UART2 */
PE3_PF_UART2_CTS,
PE4_PF_UART2_RTS,
PE6_PF_UART2_TXD,
PE7_PF_UART2_RXD,
/* UART3 */
PE8_PF_UART3_TXD,
PE9_PF_UART3_RXD,
PE10_PF_UART3_CTS,
PE11_PF_UART3_RTS,
/* UART4 */
#if !defined(MACH_EUKREA_CPUIMX27_USEUART4)
PB26_AF_UART4_RTS,
PB28_AF_UART4_TXD,
PB29_AF_UART4_CTS,
PB31_AF_UART4_RXD,
#endif
/* SDHC1*/
PE18_PF_SD1_D0,
PE19_PF_SD1_D1,
PE20_PF_SD1_D2,
PE21_PF_SD1_D3,
PE22_PF_SD1_CMD,
PE23_PF_SD1_CLK,
/* display */
PA5_PF_LSCLK,
PA6_PF_LD0,
PA7_PF_LD1,
PA8_PF_LD2,
PA9_PF_LD3,
PA10_PF_LD4,
PA11_PF_LD5,
PA12_PF_LD6,
PA13_PF_LD7,
PA14_PF_LD8,
PA15_PF_LD9,
PA16_PF_LD10,
PA17_PF_LD11,
PA18_PF_LD12,
PA19_PF_LD13,
PA20_PF_LD14,
PA21_PF_LD15,
PA22_PF_LD16,
PA23_PF_LD17,
PA28_PF_HSYNC,
PA29_PF_VSYNC,
PA30_PF_CONTRAST,
PA31_PF_OE_ACD,
/* SPI1 */
PD29_PF_CSPI1_SCLK,
PD30_PF_CSPI1_MISO,
PD31_PF_CSPI1_MOSI,
/* SSI4 */
#if defined(CONFIG_SND_SOC_EUKREA_TLV320) \
|| defined(CONFIG_SND_SOC_EUKREA_TLV320_MODULE)
PC16_PF_SSI4_FS,
PC17_PF_SSI4_RXD | GPIO_PUEN,
PC18_PF_SSI4_TXD | GPIO_PUEN,
PC19_PF_SSI4_CLK,
#endif
};
static const uint32_t eukrea_mbimx27_keymap[] = {
KEY(0, 0, KEY_UP),
KEY(0, 1, KEY_DOWN),
KEY(1, 0, KEY_RIGHT),
KEY(1, 1, KEY_LEFT),
};
static const struct matrix_keymap_data
eukrea_mbimx27_keymap_data __initconst = {
.keymap = eukrea_mbimx27_keymap,
.keymap_size = ARRAY_SIZE(eukrea_mbimx27_keymap),
};
static struct gpio_led gpio_leds[] = {
{
.name = "led1",
.default_trigger = "heartbeat",
.active_low = 1,
.gpio = GPIO_PORTF | 16,
},
{
.name = "led2",
.default_trigger = "none",
.active_low = 1,
.gpio = GPIO_PORTF | 19,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static struct imx_fb_videomode eukrea_mbimx27_modes[] = {
{
.mode = {
.name = "CMO-QVGA",
.refresh = 60,
.xres = 320,
.yres = 240,
.pixclock = 156000,
.hsync_len = 30,
.left_margin = 38,
.right_margin = 20,
.vsync_len = 3,
.upper_margin = 15,
.lower_margin = 4,
},
.pcr = 0xFAD08B80,
.bpp = 16,
}, {
.mode = {
.name = "DVI-VGA",
.refresh = 60,
.xres = 640,
.yres = 480,
.pixclock = 32000,
.hsync_len = 1,
.left_margin = 35,
.right_margin = 0,
.vsync_len = 1,
.upper_margin = 7,
.lower_margin = 0,
},
.pcr = 0xFA208B80,
.bpp = 16,
}, {
.mode = {
.name = "DVI-SVGA",
.refresh = 60,
.xres = 800,
.yres = 600,
.pixclock = 25000,
.hsync_len = 1,
.left_margin = 35,
.right_margin = 0,
.vsync_len = 1,
.upper_margin = 7,
.lower_margin = 0,
},
.pcr = 0xFA208B80,
.bpp = 16,
},
};
static const struct imx_fb_platform_data eukrea_mbimx27_fb_data __initconst = {
.mode = eukrea_mbimx27_modes,
.num_modes = ARRAY_SIZE(eukrea_mbimx27_modes),
.pwmr = 0x00A903FF,
.lscr1 = 0x00120300,
.dmacr = 0x00040060,
};
static void eukrea_mbimx27_bl_set_intensity(int intensity)
{
if (intensity)
gpio_direction_output(GPIO_PORTE | 5, 1);
else
gpio_direction_output(GPIO_PORTE | 5, 0);
}
static struct generic_bl_info eukrea_mbimx27_bl_info = {
.name = "eukrea_mbimx27-bl",
.max_intensity = 0xff,
.default_intensity = 0xff,
.set_bl_intensity = eukrea_mbimx27_bl_set_intensity,
};
static struct platform_device eukrea_mbimx27_bl_dev = {
.name = "generic-bl",
.id = 1,
.dev = {
.platform_data = &eukrea_mbimx27_bl_info,
},
};
static void eukrea_mbimx27_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
if (power)
gpio_direction_output(GPIO_PORTA | 25, 1);
else
gpio_direction_output(GPIO_PORTA | 25, 0);
}
static struct plat_lcd_data eukrea_mbimx27_lcd_power_data = {
.set_power = eukrea_mbimx27_lcd_power_set,
};
static struct platform_device eukrea_mbimx27_lcd_powerdev = {
.name = "platform-lcd",
.dev.platform_data = &eukrea_mbimx27_lcd_power_data,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
#define ADS7846_PENDOWN (GPIO_PORTD | 25)
static void __maybe_unused ads7846_dev_init(void)
{
if (gpio_request(ADS7846_PENDOWN, "ADS7846 pendown") < 0) {
printk(KERN_ERR "can't get ads746 pen down GPIO\n");
return;
}
gpio_direction_input(ADS7846_PENDOWN);
}
static int ads7846_get_pendown_state(void)
{
return !gpio_get_value(ADS7846_PENDOWN);
}
static struct ads7846_platform_data ads7846_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
};
static struct spi_board_info __maybe_unused
eukrea_mbimx27_spi_board_info[] __initdata = {
[0] = {
.modalias = "ads7846",
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 1500000,
.irq = IRQ_GPIOD(25),
.platform_data = &ads7846_config,
.mode = SPI_MODE_2,
},
};
static int eukrea_mbimx27_spi_cs[] = {GPIO_PORTD | 28};
static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
.chipselect = eukrea_mbimx27_spi_cs,
.num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
};
static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
{
I2C_BOARD_INFO("tlv320aic23", 0x1a),
},
};
static struct platform_device *platform_devices[] __initdata = {
&leds_gpio,
};
static const struct imxmmc_platform_data sdhc_pdata __initconst = {
.dat3_card_detect = 1,
};
static const
struct imx_ssi_platform_data eukrea_mbimx27_ssi_pdata __initconst = {
.flags = IMX_SSI_DMA | IMX_SSI_USE_I2S_SLAVE,
};
/*
* system init for baseboard usage. Will be called by cpuimx27 init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init eukrea_mbimx27_baseboard_init(void)
{
mxc_gpio_setup_multiple_pins(eukrea_mbimx27_pins,
ARRAY_SIZE(eukrea_mbimx27_pins), "MBIMX27");
#if defined(CONFIG_SND_SOC_EUKREA_TLV320) \
|| defined(CONFIG_SND_SOC_EUKREA_TLV320_MODULE)
/* SSI unit master I2S codec connected to SSI_PINS_4*/
mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
MXC_AUDMUX_V1_PCR_SYN |
MXC_AUDMUX_V1_PCR_TFSDIR |
MXC_AUDMUX_V1_PCR_TCLKDIR |
MXC_AUDMUX_V1_PCR_RFSDIR |
MXC_AUDMUX_V1_PCR_RCLKDIR |
MXC_AUDMUX_V1_PCR_TFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
MXC_AUDMUX_V1_PCR_RFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
MXC_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4)
);
mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR3_SSI_PINS_4,
MXC_AUDMUX_V1_PCR_SYN |
MXC_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0)
);
#endif
imx27_add_imx_uart1(&uart_pdata);
imx27_add_imx_uart2(&uart_pdata);
#if !defined(MACH_EUKREA_CPUIMX27_USEUART4)
imx27_add_imx_uart3(&uart_pdata);
#endif
imx27_add_imx_fb(&eukrea_mbimx27_fb_data);
imx27_add_mxc_mmc(0, &sdhc_pdata);
i2c_register_board_info(0, eukrea_mbimx27_i2c_devices,
ARRAY_SIZE(eukrea_mbimx27_i2c_devices));
imx27_add_imx_ssi(0, &eukrea_mbimx27_ssi_pdata);
#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
|| defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
/* ADS7846 Touchscreen controller init */
mxc_gpio_mode(GPIO_PORTD | 25 | GPIO_GPIO | GPIO_IN);
ads7846_dev_init();
#endif
/* SPI_CS0 init */
mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT);
imx27_add_spi_imx0(&eukrea_mbimx27_spi0_data);
spi_register_board_info(eukrea_mbimx27_spi_board_info,
ARRAY_SIZE(eukrea_mbimx27_spi_board_info));
/* Leds configuration */
mxc_gpio_mode(GPIO_PORTF | 16 | GPIO_GPIO | GPIO_OUT);
mxc_gpio_mode(GPIO_PORTF | 19 | GPIO_GPIO | GPIO_OUT);
/* Backlight */
mxc_gpio_mode(GPIO_PORTE | 5 | GPIO_GPIO | GPIO_OUT);
gpio_request(GPIO_PORTE | 5, "backlight");
platform_device_register(&eukrea_mbimx27_bl_dev);
/* LCD Reset */
mxc_gpio_mode(GPIO_PORTA | 25 | GPIO_GPIO | GPIO_OUT);
gpio_request(GPIO_PORTA | 25, "lcd_enable");
platform_device_register(&eukrea_mbimx27_lcd_powerdev);
imx27_add_imx_keypad(&eukrea_mbimx27_keymap_data);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
}
| gpl-2.0 |
xuvw/linux | scripts/kconfig/lxdialog/checklist.c | 2536 | 8438 | /*
* checklist.c -- implements the checklist box
*
* ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk)
* Stuart Herbert - S.Herbert@sheffield.ac.uk: radiolist extension
* Alessandro Rubini - rubini@ipvvis.unipv.it: merged the two
* MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "dialog.h"
static int list_width, check_x, item_x;
/*
* Print list item
*/
static void print_item(WINDOW * win, int choice, int selected)
{
int i;
char *list_item = malloc(list_width + 1);
strncpy(list_item, item_str(), list_width - item_x);
list_item[list_width - item_x] = '\0';
/* Clear 'residue' of last item */
wattrset(win, dlg.menubox.atr);
wmove(win, choice, 0);
for (i = 0; i < list_width; i++)
waddch(win, ' ');
wmove(win, choice, check_x);
wattrset(win, selected ? dlg.check_selected.atr
: dlg.check.atr);
if (!item_is_tag(':'))
wprintw(win, "(%c)", item_is_tag('X') ? 'X' : ' ');
wattrset(win, selected ? dlg.tag_selected.atr : dlg.tag.atr);
mvwaddch(win, choice, item_x, list_item[0]);
wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr);
waddstr(win, list_item + 1);
if (selected) {
wmove(win, choice, check_x + 1);
wrefresh(win);
}
free(list_item);
}
/*
* Print the scroll indicators.
*/
static void print_arrows(WINDOW * win, int choice, int item_no, int scroll,
int y, int x, int height)
{
wmove(win, y, x);
if (scroll > 0) {
wattrset(win, dlg.uarrow.atr);
waddch(win, ACS_UARROW);
waddstr(win, "(-)");
} else {
wattrset(win, dlg.menubox.atr);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
}
y = y + height + 1;
wmove(win, y, x);
if ((height < item_no) && (scroll + choice < item_no - 1)) {
wattrset(win, dlg.darrow.atr);
waddch(win, ACS_DARROW);
waddstr(win, "(+)");
} else {
wattrset(win, dlg.menubox_border.atr);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
waddch(win, ACS_HLINE);
}
}
/*
* Display the termination buttons
*/
static void print_buttons(WINDOW * dialog, int height, int width, int selected)
{
int x = width / 2 - 11;
int y = height - 2;
print_button(dialog, gettext("Select"), y, x, selected == 0);
print_button(dialog, gettext(" Help "), y, x + 14, selected == 1);
wmove(dialog, y, x + 1 + 14 * selected);
wrefresh(dialog);
}
/*
* Display a dialog box with a list of options that can be turned on or off
* in the style of radiolist (only one option turned on at a time).
*/
int dialog_checklist(const char *title, const char *prompt, int height,
int width, int list_height)
{
int i, x, y, box_x, box_y;
int key = 0, button = 0, choice = 0, scroll = 0, max_choice;
WINDOW *dialog, *list;
/* which item to highlight */
item_foreach() {
if (item_is_tag('X'))
choice = item_n();
if (item_is_selected()) {
choice = item_n();
break;
}
}
do_resize:
if (getmaxy(stdscr) < (height + CHECKLIST_HEIGTH_MIN))
return -ERRDISPLAYTOOSMALL;
if (getmaxx(stdscr) < (width + CHECKLIST_WIDTH_MIN))
return -ERRDISPLAYTOOSMALL;
max_choice = MIN(list_height, item_count());
/* center dialog box on screen */
x = (getmaxx(stdscr) - width) / 2;
y = (getmaxy(stdscr) - height) / 2;
draw_shadow(stdscr, y, x, height, width);
dialog = newwin(height, width, y, x);
keypad(dialog, TRUE);
draw_box(dialog, 0, 0, height, width,
dlg.dialog.atr, dlg.border.atr);
wattrset(dialog, dlg.border.atr);
mvwaddch(dialog, height - 3, 0, ACS_LTEE);
for (i = 0; i < width - 2; i++)
waddch(dialog, ACS_HLINE);
wattrset(dialog, dlg.dialog.atr);
waddch(dialog, ACS_RTEE);
print_title(dialog, title, width);
wattrset(dialog, dlg.dialog.atr);
print_autowrap(dialog, prompt, width - 2, 1, 3);
list_width = width - 6;
box_y = height - list_height - 5;
box_x = (width - list_width) / 2 - 1;
/* create new window for the list */
list = subwin(dialog, list_height, list_width, y + box_y + 1,
x + box_x + 1);
keypad(list, TRUE);
/* draw a box around the list items */
draw_box(dialog, box_y, box_x, list_height + 2, list_width + 2,
dlg.menubox_border.atr, dlg.menubox.atr);
/* Find length of longest item in order to center checklist */
check_x = 0;
item_foreach()
check_x = MAX(check_x, strlen(item_str()) + 4);
check_x = MIN(check_x, list_width);
check_x = (list_width - check_x) / 2;
item_x = check_x + 4;
if (choice >= list_height) {
scroll = choice - list_height + 1;
choice -= scroll;
}
/* Print the list */
for (i = 0; i < max_choice; i++) {
item_set(scroll + i);
print_item(list, i, i == choice);
}
print_arrows(dialog, choice, item_count(), scroll,
box_y, box_x + check_x + 5, list_height);
print_buttons(dialog, height, width, 0);
wnoutrefresh(dialog);
wnoutrefresh(list);
doupdate();
while (key != KEY_ESC) {
key = wgetch(dialog);
for (i = 0; i < max_choice; i++) {
item_set(i + scroll);
if (toupper(key) == toupper(item_str()[0]))
break;
}
if (i < max_choice || key == KEY_UP || key == KEY_DOWN ||
key == '+' || key == '-') {
if (key == KEY_UP || key == '-') {
if (!choice) {
if (!scroll)
continue;
/* Scroll list down */
if (list_height > 1) {
/* De-highlight current first item */
item_set(scroll);
print_item(list, 0, FALSE);
scrollok(list, TRUE);
wscrl(list, -1);
scrollok(list, FALSE);
}
scroll--;
item_set(scroll);
print_item(list, 0, TRUE);
print_arrows(dialog, choice, item_count(),
scroll, box_y, box_x + check_x + 5, list_height);
wnoutrefresh(dialog);
wrefresh(list);
continue; /* wait for another key press */
} else
i = choice - 1;
} else if (key == KEY_DOWN || key == '+') {
if (choice == max_choice - 1) {
if (scroll + choice >= item_count() - 1)
continue;
/* Scroll list up */
if (list_height > 1) {
/* De-highlight current last item before scrolling up */
item_set(scroll + max_choice - 1);
print_item(list,
max_choice - 1,
FALSE);
scrollok(list, TRUE);
wscrl(list, 1);
scrollok(list, FALSE);
}
scroll++;
item_set(scroll + max_choice - 1);
print_item(list, max_choice - 1, TRUE);
print_arrows(dialog, choice, item_count(),
scroll, box_y, box_x + check_x + 5, list_height);
wnoutrefresh(dialog);
wrefresh(list);
continue; /* wait for another key press */
} else
i = choice + 1;
}
if (i != choice) {
/* De-highlight current item */
item_set(scroll + choice);
print_item(list, choice, FALSE);
/* Highlight new item */
choice = i;
item_set(scroll + choice);
print_item(list, choice, TRUE);
wnoutrefresh(dialog);
wrefresh(list);
}
continue; /* wait for another key press */
}
switch (key) {
case 'H':
case 'h':
case '?':
button = 1;
/* fall-through */
case 'S':
case 's':
case ' ':
case '\n':
item_foreach()
item_set_selected(0);
item_set(scroll + choice);
item_set_selected(1);
delwin(list);
delwin(dialog);
return button;
case TAB:
case KEY_LEFT:
case KEY_RIGHT:
button = ((key == KEY_LEFT ? --button : ++button) < 0)
? 1 : (button > 1 ? 0 : button);
print_buttons(dialog, height, width, button);
wrefresh(dialog);
break;
case 'X':
case 'x':
key = KEY_ESC;
break;
case KEY_ESC:
key = on_key_esc(dialog);
break;
case KEY_RESIZE:
delwin(list);
delwin(dialog);
on_key_resize();
goto do_resize;
}
/* Now, update everything... */
doupdate();
}
delwin(list);
delwin(dialog);
return key; /* ESC pressed */
}
| gpl-2.0 |
vikrant82/t320_kernel | drivers/staging/ste_rmi4/synaptics_i2c_rmi4_staging.c | 2792 | 35803 | /**
*
* Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
* Copyright (c) 2007-2010, Synaptics Incorporated
*
* Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* Copyright 2010 (c) ST-Ericsson AB
*/
/*
* This file is licensed under the GPL2 license.
*
*#############################################################################
* GPL
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
*#############################################################################
*/
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include "synaptics_i2c_rmi4_staging.h"
/* TODO: for multiple device support will need a per-device mutex */
#define DRIVER_NAME "synaptics_rmi4_i2c"
#define MAX_ERROR_REPORT 6
#define MAX_TOUCH_MAJOR 15
#define MAX_RETRY_COUNT 5
#define STD_QUERY_LEN 21
#define PAGE_LEN 2
#define DATA_BUF_LEN 32
#define BUF_LEN 37
#define QUERY_LEN 9
#define DATA_LEN 12
#define HAS_TAP 0x01
#define HAS_PALMDETECT 0x01
#define HAS_ROTATE 0x02
#define HAS_TAPANDHOLD 0x02
#define HAS_DOUBLETAP 0x04
#define HAS_EARLYTAP 0x08
#define HAS_RELEASE 0x08
#define HAS_FLICK 0x10
#define HAS_PRESS 0x20
#define HAS_PINCH 0x40
#define MASK_16BIT 0xFFFF
#define MASK_8BIT 0xFF
#define MASK_7BIT 0x7F
#define MASK_5BIT 0x1F
#define MASK_4BIT 0x0F
#define MASK_3BIT 0x07
#define MASK_2BIT 0x03
#define TOUCHPAD_CTRL_INTR 0x8
#define PDT_START_SCAN_LOCATION (0x00E9)
#define PDT_END_SCAN_LOCATION (0x000A)
#define PDT_ENTRY_SIZE (0x0006)
#define RMI4_NUMBER_OF_MAX_FINGERS (8)
#define SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM (0x11)
#define SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM (0x01)
/**
* struct synaptics_rmi4_fn_desc - contains the function descriptor information
* @query_base_addr: base address for query
* @cmd_base_addr: base address for command
* @ctrl_base_addr: base address for control
* @data_base_addr: base address for data
* @intr_src_count: count for the interrupt source
* @fn_number: function number
*
* This structure is used to gives the function descriptor information
* of the particular functionality.
*/
struct synaptics_rmi4_fn_desc {
unsigned char query_base_addr;
unsigned char cmd_base_addr;
unsigned char ctrl_base_addr;
unsigned char data_base_addr;
unsigned char intr_src_count;
unsigned char fn_number;
};
/**
* struct synaptics_rmi4_fn - contains the function information
* @fn_number: function number
* @num_of_data_sources: number of data sources
* @num_of_data_points: number of fingers touched
* @size_of_data_register_block: data register block size
* @index_to_intr_reg: index for interrupt register
* @intr_mask: interrupt mask value
* @fn_desc: variable for function descriptor structure
* @link: linked list for function descriptors
*
* This structure gives information about the number of data sources and
* the number of data registers associated with the function.
*/
struct synaptics_rmi4_fn {
unsigned char fn_number;
unsigned char num_of_data_sources;
unsigned char num_of_data_points;
unsigned char size_of_data_register_block;
unsigned char index_to_intr_reg;
unsigned char intr_mask;
struct synaptics_rmi4_fn_desc fn_desc;
struct list_head link;
};
/**
* struct synaptics_rmi4_device_info - contains the rmi4 device information
* @version_major: protocol major version number
* @version_minor: protocol minor version number
* @manufacturer_id: manufacturer identification byte
* @product_props: product properties information
* @product_info: product info array
* @date_code: device manufacture date
* @tester_id: tester id array
* @serial_number: serial number for that device
* @product_id_string: product id for the device
* @support_fn_list: linked list for device information
*
* This structure gives information about the number of data sources and
* the number of data registers associated with the function.
*/
struct synaptics_rmi4_device_info {
unsigned int version_major;
unsigned int version_minor;
unsigned char manufacturer_id;
unsigned char product_props;
unsigned char product_info[2];
unsigned char date_code[3];
unsigned short tester_id;
unsigned short serial_number;
unsigned char product_id_string[11];
struct list_head support_fn_list;
};
/**
* struct synaptics_rmi4_data - contains the rmi4 device data
* @rmi4_mod_info: structure variable for rmi4 device info
* @input_dev: pointer for input device
* @i2c_client: pointer for i2c client
* @board: constant pointer for touch platform data
* @fn_list_mutex: mutex for function list
* @rmi4_page_mutex: mutex for rmi4 page
* @current_page: variable for integer
* @number_of_interrupt_register: interrupt registers count
* @fn01_ctrl_base_addr: control base address for fn01
* @fn01_query_base_addr: query base address for fn01
* @fn01_data_base_addr: data base address for fn01
* @sensor_max_x: sensor maximum x value
* @sensor_max_y: sensor maximum y value
* @regulator: pointer to the regulator structure
* @wait: wait queue structure variable
* @touch_stopped: flag to stop the thread function
*
* This structure gives the device data information.
*/
struct synaptics_rmi4_data {
struct synaptics_rmi4_device_info rmi4_mod_info;
struct input_dev *input_dev;
struct i2c_client *i2c_client;
const struct synaptics_rmi4_platform_data *board;
struct mutex fn_list_mutex;
struct mutex rmi4_page_mutex;
int current_page;
unsigned int number_of_interrupt_register;
unsigned short fn01_ctrl_base_addr;
unsigned short fn01_query_base_addr;
unsigned short fn01_data_base_addr;
int sensor_max_x;
int sensor_max_y;
struct regulator *regulator;
wait_queue_head_t wait;
bool touch_stopped;
};
/**
* synaptics_rmi4_set_page() - sets the page
* @pdata: pointer to synaptics_rmi4_data structure
* @address: set the address of the page
*
* This function is used to set the page and returns integer.
*/
static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *pdata,
unsigned int address)
{
unsigned char txbuf[PAGE_LEN];
int retval;
unsigned int page;
struct i2c_client *i2c = pdata->i2c_client;
page = ((address >> 8) & MASK_8BIT);
if (page != pdata->current_page) {
txbuf[0] = MASK_8BIT;
txbuf[1] = page;
retval = i2c_master_send(i2c, txbuf, PAGE_LEN);
if (retval != PAGE_LEN)
dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
else
pdata->current_page = page;
} else
retval = PAGE_LEN;
return retval;
}
/**
* synaptics_rmi4_i2c_block_read() - read the block of data
* @pdata: pointer to synaptics_rmi4_data structure
* @address: read the block of data from this offset
* @valp: pointer to a buffer containing the data to be read
* @size: number of bytes to read
*
* This function is to read the block of data and returns integer.
*/
static int synaptics_rmi4_i2c_block_read(struct synaptics_rmi4_data *pdata,
unsigned short address,
unsigned char *valp, int size)
{
int retval = 0;
int retry_count = 0;
int index;
struct i2c_client *i2c = pdata->i2c_client;
mutex_lock(&(pdata->rmi4_page_mutex));
retval = synaptics_rmi4_set_page(pdata, address);
if (retval != PAGE_LEN)
goto exit;
index = address & MASK_8BIT;
retry:
retval = i2c_smbus_read_i2c_block_data(i2c, index, size, valp);
if (retval != size) {
if (++retry_count == MAX_RETRY_COUNT)
dev_err(&i2c->dev,
"%s:address 0x%04x size %d failed:%d\n",
__func__, address, size, retval);
else {
synaptics_rmi4_set_page(pdata, address);
goto retry;
}
}
exit:
mutex_unlock(&(pdata->rmi4_page_mutex));
return retval;
}
/**
* synaptics_rmi4_i2c_byte_write() - write the single byte data
* @pdata: pointer to synaptics_rmi4_data structure
* @address: write the block of data from this offset
* @data: data to be write
*
* This function is to write the single byte data and returns integer.
*/
static int synaptics_rmi4_i2c_byte_write(struct synaptics_rmi4_data *pdata,
unsigned short address,
unsigned char data)
{
unsigned char txbuf[2];
int retval = 0;
struct i2c_client *i2c = pdata->i2c_client;
/* Can't have anyone else changing the page behind our backs */
mutex_lock(&(pdata->rmi4_page_mutex));
retval = synaptics_rmi4_set_page(pdata, address);
if (retval != PAGE_LEN)
goto exit;
txbuf[0] = address & MASK_8BIT;
txbuf[1] = data;
retval = i2c_master_send(pdata->i2c_client, txbuf, 2);
/* Add in retry on writes only in certain error return values */
if (retval != 2) {
dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
retval = -EIO;
} else
retval = 1;
exit:
mutex_unlock(&(pdata->rmi4_page_mutex));
return retval;
}
/**
* synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
*
* This function calls to reports for the rmi4 touchpad device
*/
static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
/* number of touch points - fingers down in this case */
int touch_count = 0;
int finger;
int fingers_supported;
int finger_registers;
int reg;
int finger_shift;
int finger_status;
int retval;
unsigned short data_base_addr;
unsigned short data_offset;
unsigned char data_reg_blk_size;
unsigned char values[2];
unsigned char data[DATA_LEN];
int x[RMI4_NUMBER_OF_MAX_FINGERS];
int y[RMI4_NUMBER_OF_MAX_FINGERS];
int wx[RMI4_NUMBER_OF_MAX_FINGERS];
int wy[RMI4_NUMBER_OF_MAX_FINGERS];
struct i2c_client *client = pdata->i2c_client;
/* get 2D sensor finger data */
/*
* First get the finger status field - the size of the finger status
* field is determined by the number of finger supporte - 2 bits per
* finger, so the number of registers to read is:
* registerCount = ceil(numberOfFingers/4).
* Read the required number of registers and check each 2 bit field to
* determine if a finger is down:
* 00 = finger not present,
* 01 = finger present and data accurate,
* 10 = finger present but data may not be accurate,
* 11 = reserved for product use.
*/
fingers_supported = rfi->num_of_data_points;
finger_registers = (fingers_supported + 3)/4;
data_base_addr = rfi->fn_desc.data_base_addr;
retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values,
finger_registers);
if (retval != finger_registers) {
dev_err(&client->dev, "%s:read status registers failed\n",
__func__);
return 0;
}
/*
* For each finger present, read the proper number of registers
* to get absolute data.
*/
data_reg_blk_size = rfi->size_of_data_register_block;
for (finger = 0; finger < fingers_supported; finger++) {
/* determine which data byte the finger status is in */
reg = finger/4;
/* bit shift to get finger's status */
finger_shift = (finger % 4) * 2;
finger_status = (values[reg] >> finger_shift) & 3;
/*
* if finger status indicates a finger is present then
* read the finger data and report it
*/
if (finger_status == 1 || finger_status == 2) {
/* Read the finger data */
data_offset = data_base_addr +
((finger * data_reg_blk_size) +
finger_registers);
retval = synaptics_rmi4_i2c_block_read(pdata,
data_offset, data,
data_reg_blk_size);
if (retval != data_reg_blk_size) {
printk(KERN_ERR "%s:read data failed\n",
__func__);
return 0;
} else {
x[touch_count] =
(data[0] << 4) | (data[2] & MASK_4BIT);
y[touch_count] =
(data[1] << 4) |
((data[2] >> 4) & MASK_4BIT);
wy[touch_count] =
(data[3] >> 4) & MASK_4BIT;
wx[touch_count] =
(data[3] & MASK_4BIT);
if (pdata->board->x_flip)
x[touch_count] =
pdata->sensor_max_x -
x[touch_count];
if (pdata->board->y_flip)
y[touch_count] =
pdata->sensor_max_y -
y[touch_count];
}
/* number of active touch points */
touch_count++;
}
}
/* report to input subsystem */
if (touch_count) {
for (finger = 0; finger < touch_count; finger++) {
input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
max(wx[finger] , wy[finger]));
input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
x[finger]);
input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
y[finger]);
input_mt_sync(pdata->input_dev);
}
} else
input_mt_sync(pdata->input_dev);
/* sync after groups of events */
input_sync(pdata->input_dev);
/* return the number of touch points */
return touch_count;
}
/**
* synaptics_rmi4_report_device() - reports the rmi4 device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn
*
* This function is used to call the report function of the rmi4 device.
*/
static int synaptics_rmi4_report_device(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
int touch = 0;
struct i2c_client *client = pdata->i2c_client;
static int num_error_reports;
if (rfi->fn_number != SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
num_error_reports++;
if (num_error_reports < MAX_ERROR_REPORT)
dev_err(&client->dev, "%s:report not supported\n",
__func__);
} else
touch = synpatics_rmi4_touchpad_report(pdata, rfi);
return touch;
}
/**
* synaptics_rmi4_sensor_report() - reports to input subsystem
* @pdata: pointer to synaptics_rmi4_data structure
*
* This function is used to reads in all data sources and reports
* them to the input subsystem.
*/
static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *pdata)
{
unsigned char intr_status[4];
/* number of touch points - fingers or buttons */
int touch = 0;
unsigned int retval;
struct synaptics_rmi4_fn *rfi;
struct synaptics_rmi4_device_info *rmi;
struct i2c_client *client = pdata->i2c_client;
/*
* Get the interrupt status from the function $01
* control register+1 to find which source(s) were interrupting
* so we can read the data from the source(s) (2D sensor, buttons..)
*/
retval = synaptics_rmi4_i2c_block_read(pdata,
pdata->fn01_data_base_addr + 1,
intr_status,
pdata->number_of_interrupt_register);
if (retval != pdata->number_of_interrupt_register) {
dev_err(&client->dev,
"could not read interrupt status registers\n");
return 0;
}
/*
* check each function that has data sources and if the interrupt for
* that triggered then call that RMI4 functions report() function to
* gather data and report it to the input subsystem
*/
rmi = &(pdata->rmi4_mod_info);
list_for_each_entry(rfi, &rmi->support_fn_list, link) {
if (rfi->num_of_data_sources) {
if (intr_status[rfi->index_to_intr_reg] &
rfi->intr_mask)
touch = synaptics_rmi4_report_device(pdata,
rfi);
}
}
/* return the number of touch points */
return touch;
}
/**
* synaptics_rmi4_irq() - thread function for rmi4 attention line
* @irq: irq value
* @data: void pointer
*
* This function is interrupt thread function. It just notifies the
* application layer that attention is required.
*/
static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
{
struct synaptics_rmi4_data *pdata = data;
int touch_count;
do {
touch_count = synaptics_rmi4_sensor_report(pdata);
if (touch_count)
wait_event_timeout(pdata->wait, pdata->touch_stopped,
msecs_to_jiffies(1));
else
break;
} while (!pdata->touch_stopped);
return IRQ_HANDLED;
}
/**
* synpatics_rmi4_touchpad_detect() - detects the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
* @fd: pointer to synaptics_rmi4_fn_desc structure
* @interruptcount: count the number of interrupts
*
* This function calls to detects the rmi4 touchpad device
*/
static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi,
struct synaptics_rmi4_fn_desc *fd,
unsigned int interruptcount)
{
unsigned char queries[QUERY_LEN];
unsigned short intr_offset;
unsigned char abs_data_size;
unsigned char abs_data_blk_size;
unsigned char egr_0, egr_1;
unsigned int all_data_blk_size;
int has_pinch, has_flick, has_tap;
int has_tapandhold, has_doubletap;
int has_earlytap, has_press;
int has_palmdetect, has_rotate;
int has_rel;
int i;
int retval;
struct i2c_client *client = pdata->i2c_client;
rfi->fn_desc.query_base_addr = fd->query_base_addr;
rfi->fn_desc.data_base_addr = fd->data_base_addr;
rfi->fn_desc.intr_src_count = fd->intr_src_count;
rfi->fn_desc.fn_number = fd->fn_number;
rfi->fn_number = fd->fn_number;
rfi->num_of_data_sources = fd->intr_src_count;
rfi->fn_desc.ctrl_base_addr = fd->ctrl_base_addr;
rfi->fn_desc.cmd_base_addr = fd->cmd_base_addr;
/*
* need to get number of fingers supported, data size, etc.
* to be used when getting data since the number of registers to
* read depends on the number of fingers supported and data size.
*/
retval = synaptics_rmi4_i2c_block_read(pdata, fd->query_base_addr,
queries,
sizeof(queries));
if (retval != sizeof(queries)) {
dev_err(&client->dev, "%s:read function query registers\n",
__func__);
return retval;
}
/*
* 2D data sources have only 3 bits for the number of fingers
* supported - so the encoding is a bit weird.
*/
if ((queries[1] & MASK_3BIT) <= 4)
/* add 1 since zero based */
rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
else {
/*
* a value of 5 is up to 10 fingers - 6 and 7 are reserved
* (shouldn't get these i int retval;n a normal 2D source).
*/
if ((queries[1] & MASK_3BIT) == 5)
rfi->num_of_data_points = 10;
}
/* Need to get interrupt info for handling interrupts */
rfi->index_to_intr_reg = (interruptcount + 7)/8;
if (rfi->index_to_intr_reg != 0)
rfi->index_to_intr_reg -= 1;
/*
* loop through interrupts for each source in fn $11
* and or in a bit to the interrupt mask for each.
*/
intr_offset = interruptcount % 8;
rfi->intr_mask = 0;
for (i = intr_offset;
i < ((fd->intr_src_count & MASK_3BIT) + intr_offset); i++)
rfi->intr_mask |= 1 << i;
/* Size of just the absolute data for one finger */
abs_data_size = queries[5] & MASK_2BIT;
/* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
rfi->size_of_data_register_block = abs_data_blk_size;
/*
* need to determine the size of data to read - this depends on
* conditions such as whether Relative data is reported and if Gesture
* data is reported.
*/
egr_0 = queries[7];
egr_1 = queries[8];
/*
* Get info about what EGR data is supported, whether it has
* Relative data supported, etc.
*/
has_pinch = egr_0 & HAS_PINCH;
has_flick = egr_0 & HAS_FLICK;
has_tap = egr_0 & HAS_TAP;
has_earlytap = egr_0 & HAS_EARLYTAP;
has_press = egr_0 & HAS_PRESS;
has_rotate = egr_1 & HAS_ROTATE;
has_rel = queries[1] & HAS_RELEASE;
has_tapandhold = egr_0 & HAS_TAPANDHOLD;
has_doubletap = egr_0 & HAS_DOUBLETAP;
has_palmdetect = egr_1 & HAS_PALMDETECT;
/*
* Size of all data including finger status, absolute data for each
* finger, relative data and EGR data
*/
all_data_blk_size =
/* finger status, four fingers per register */
((rfi->num_of_data_points + 3) / 4) +
/* absolute data, per finger times number of fingers */
(abs_data_blk_size * rfi->num_of_data_points) +
/*
* two relative registers (if relative is being reported)
*/
2 * has_rel +
/*
* F11_2D_data8 is only present if the egr_0
* register is non-zero.
*/
!!(egr_0) +
/*
* F11_2D_data9 is only present if either egr_0 or
* egr_1 registers are non-zero.
*/
(egr_0 || egr_1) +
/*
* F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
* egr_0 reports as 1.
*/
!!(has_pinch | has_flick) +
/*
* F11_2D_data11 and F11_2D_data12 are only present if
* EGR_FLICK of egr_0 reports as 1.
*/
2 * !!(has_flick);
return retval;
}
/**
* synpatics_rmi4_touchpad_config() - confiures the rmi4 touchpad device
* @pdata: pointer to synaptics_rmi4_data structure
* @rfi: pointer to synaptics_rmi4_fn structure
*
* This function calls to confiures the rmi4 touchpad device
*/
int synpatics_rmi4_touchpad_config(struct synaptics_rmi4_data *pdata,
struct synaptics_rmi4_fn *rfi)
{
/*
* For the data source - print info and do any
* source specific configuration.
*/
unsigned char data[BUF_LEN];
int retval = 0;
struct i2c_client *client = pdata->i2c_client;
/* Get and print some info about the data source... */
/* To Query 2D devices we need to read from the address obtained
* from the function descriptor stored in the RMI function info.
*/
retval = synaptics_rmi4_i2c_block_read(pdata,
rfi->fn_desc.query_base_addr,
data, QUERY_LEN);
if (retval != QUERY_LEN)
dev_err(&client->dev, "%s:read query registers failed\n",
__func__);
else {
retval = synaptics_rmi4_i2c_block_read(pdata,
rfi->fn_desc.ctrl_base_addr,
data, DATA_BUF_LEN);
if (retval != DATA_BUF_LEN) {
dev_err(&client->dev,
"%s:read control registers failed\n",
__func__);
return retval;
}
/* Store these for use later*/
pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
((data[7] & MASK_4BIT) << 8);
pdata->sensor_max_y = ((data[8] & MASK_5BIT) << 0) |
((data[9] & MASK_4BIT) << 8);
}
return retval;
}
/**
* synaptics_rmi4_i2c_query_device() - query the rmi4 device
* @pdata: pointer to synaptics_rmi4_data structure
*
* This function is used to query the rmi4 device.
*/
static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
{
int i;
int retval;
unsigned char std_queries[STD_QUERY_LEN];
unsigned char intr_count = 0;
int data_sources = 0;
unsigned int ctrl_offset;
struct synaptics_rmi4_fn *rfi;
struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_device_info *rmi;
struct i2c_client *client = pdata->i2c_client;
/*
* init the physical drivers RMI module
* info list of functions
*/
INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
/*
* Read the Page Descriptor Table to determine what functions
* are present
*/
for (i = PDT_START_SCAN_LOCATION; i > PDT_END_SCAN_LOCATION;
i -= PDT_ENTRY_SIZE) {
retval = synaptics_rmi4_i2c_block_read(pdata, i,
(unsigned char *)&rmi_fd,
sizeof(rmi_fd));
if (retval != sizeof(rmi_fd)) {
/* failed to read next PDT entry */
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
rfi = NULL;
if (rmi_fd.fn_number) {
switch (rmi_fd.fn_number & MASK_8BIT) {
case SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM:
pdata->fn01_query_base_addr =
rmi_fd.query_base_addr;
pdata->fn01_ctrl_base_addr =
rmi_fd.ctrl_base_addr;
pdata->fn01_data_base_addr =
rmi_fd.data_base_addr;
break;
case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
if (rmi_fd.intr_src_count) {
rfi = kmalloc(sizeof(*rfi),
GFP_KERNEL);
if (!rfi) {
dev_err(&client->dev,
"%s:kmalloc failed\n",
__func__);
return -ENOMEM;
}
retval = synpatics_rmi4_touchpad_detect
(pdata, rfi,
&rmi_fd,
intr_count);
if (retval < 0) {
kfree(rfi);
return retval;
}
}
break;
}
/* interrupt count for next iteration */
intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
/*
* We only want to add functions to the list
* that have data associated with them.
*/
if (rfi && rmi_fd.intr_src_count) {
/* link this function info to the RMI module */
mutex_lock(&(pdata->fn_list_mutex));
list_add_tail(&rfi->link,
&pdata->rmi4_mod_info.support_fn_list);
mutex_unlock(&(pdata->fn_list_mutex));
}
} else {
/*
* A zero in the function number
* signals the end of the PDT
*/
dev_dbg(&client->dev,
"%s:end of PDT\n", __func__);
break;
}
}
/*
* calculate the interrupt register count - used in the
* ISR to read the correct number of interrupt registers
*/
pdata->number_of_interrupt_register = (intr_count + 7) / 8;
/*
* Function $01 will be used to query the product properties,
* and product ID so we had to read the PDT above first to get
* the Fn $01 query address and prior to filling in the product
* info. NOTE: Even an unflashed device will still have FN $01.
*/
/* Load up the standard queries and get the RMI4 module info */
retval = synaptics_rmi4_i2c_block_read(pdata,
pdata->fn01_query_base_addr,
std_queries,
sizeof(std_queries));
if (retval != sizeof(std_queries)) {
dev_err(&client->dev, "%s:Failed reading queries\n",
__func__);
return -EIO;
}
/* Currently supported RMI version is 4.0 */
pdata->rmi4_mod_info.version_major = 4;
pdata->rmi4_mod_info.version_minor = 0;
/*
* get manufacturer id, product_props, product info,
* date code, tester id, serial num and product id (name)
*/
pdata->rmi4_mod_info.manufacturer_id = std_queries[0];
pdata->rmi4_mod_info.product_props = std_queries[1];
pdata->rmi4_mod_info.product_info[0] = std_queries[2];
pdata->rmi4_mod_info.product_info[1] = std_queries[3];
/* year - 2001-2032 */
pdata->rmi4_mod_info.date_code[0] = std_queries[4] & MASK_5BIT;
/* month - 1-12 */
pdata->rmi4_mod_info.date_code[1] = std_queries[5] & MASK_4BIT;
/* day - 1-31 */
pdata->rmi4_mod_info.date_code[2] = std_queries[6] & MASK_5BIT;
pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
(std_queries[8] & MASK_7BIT);
pdata->rmi4_mod_info.serial_number =
((std_queries[9] & MASK_7BIT) << 8) |
(std_queries[10] & MASK_7BIT);
memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
/* Check if this is a Synaptics device - report if not. */
if (pdata->rmi4_mod_info.manufacturer_id != 1)
dev_err(&client->dev, "%s: non-Synaptics mfg id:%d\n",
__func__, pdata->rmi4_mod_info.manufacturer_id);
list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
data_sources += rfi->num_of_data_sources;
if (data_sources) {
rmi = &(pdata->rmi4_mod_info);
list_for_each_entry(rfi, &rmi->support_fn_list, link) {
if (rfi->num_of_data_sources) {
if (rfi->fn_number ==
SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
retval = synpatics_rmi4_touchpad_config
(pdata, rfi);
if (retval < 0)
return retval;
} else
dev_err(&client->dev,
"%s:fn_number not supported\n",
__func__);
/*
* Turn on interrupts for this
* function's data sources.
*/
ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
rfi->index_to_intr_reg;
retval = synaptics_rmi4_i2c_byte_write(pdata,
ctrl_offset,
rfi->intr_mask);
if (retval < 0)
return retval;
}
}
}
return 0;
}
/**
* synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
* @i2c: i2c client structure pointer
* @id:i2c device id pointer
*
* This function will allocate and initialize the instance
* data and request the irq and set the instance data as the clients
* platform data then register the physical driver which will do a scan of
* the rmi4 Physical Device Table and enumerate any rmi4 functions that
* have data sources associated with them.
*/
static int __devinit synaptics_rmi4_probe
(struct i2c_client *client, const struct i2c_device_id *dev_id)
{
int retval;
unsigned char intr_status[4];
struct synaptics_rmi4_data *rmi4_data;
const struct synaptics_rmi4_platform_data *platformdata =
client->dev.platform_data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "i2c smbus byte data not supported\n");
return -EIO;
}
if (!platformdata) {
dev_err(&client->dev, "%s: no platform data\n", __func__);
return -EINVAL;
}
/* Allocate and initialize the instance data for this client */
rmi4_data = kzalloc(sizeof(struct synaptics_rmi4_data) * 2,
GFP_KERNEL);
if (!rmi4_data) {
dev_err(&client->dev, "%s: no memory allocated\n", __func__);
return -ENOMEM;
}
rmi4_data->input_dev = input_allocate_device();
if (rmi4_data->input_dev == NULL) {
dev_err(&client->dev, "%s:input device alloc failed\n",
__func__);
retval = -ENOMEM;
goto err_input;
}
rmi4_data->regulator = regulator_get(&client->dev, "vdd");
if (IS_ERR(rmi4_data->regulator)) {
dev_err(&client->dev, "%s:get regulator failed\n",
__func__);
retval = PTR_ERR(rmi4_data->regulator);
goto err_get_regulator;
}
retval = regulator_enable(rmi4_data->regulator);
if (retval < 0) {
dev_err(&client->dev, "%s:regulator enable failed\n",
__func__);
goto err_regulator_enable;
}
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
* later use in rmi4_read, rmi4_write, etc.
*/
rmi4_data->i2c_client = client;
/* So we set the page correctly the first time */
rmi4_data->current_page = MASK_16BIT;
rmi4_data->board = platformdata;
rmi4_data->touch_stopped = false;
/* init the mutexes for maintain the lists */
mutex_init(&(rmi4_data->fn_list_mutex));
mutex_init(&(rmi4_data->rmi4_page_mutex));
/*
* Register physical driver - this will call the detect function that
* will then scan the device and determine the supported
* rmi4 functions.
*/
retval = synaptics_rmi4_i2c_query_device(rmi4_data);
if (retval) {
dev_err(&client->dev, "%s: rmi4 query device failed\n",
__func__);
goto err_query_dev;
}
/* Store the instance data in the i2c_client */
i2c_set_clientdata(client, rmi4_data);
/*initialize the input device parameters */
rmi4_data->input_dev->name = DRIVER_NAME;
rmi4_data->input_dev->phys = "Synaptics_Clearpad";
rmi4_data->input_dev->id.bustype = BUS_I2C;
rmi4_data->input_dev->dev.parent = &client->dev;
input_set_drvdata(rmi4_data->input_dev, rmi4_data);
/* Initialize the function handlers for rmi4 */
set_bit(EV_SYN, rmi4_data->input_dev->evbit);
set_bit(EV_KEY, rmi4_data->input_dev->evbit);
set_bit(EV_ABS, rmi4_data->input_dev->evbit);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0,
rmi4_data->sensor_max_x, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
rmi4_data->sensor_max_y, 0, 0);
input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
MAX_TOUCH_MAJOR, 0, 0);
/* Clear interrupts */
synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1, intr_status,
rmi4_data->number_of_interrupt_register);
retval = request_threaded_irq(platformdata->irq_number, NULL,
synaptics_rmi4_irq,
platformdata->irq_type,
DRIVER_NAME, rmi4_data);
if (retval) {
dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
__func__, platformdata->irq_number);
goto err_query_dev;
}
retval = input_register_device(rmi4_data->input_dev);
if (retval) {
dev_err(&client->dev, "%s:input register failed\n", __func__);
goto err_free_irq;
}
return retval;
err_free_irq:
free_irq(platformdata->irq_number, rmi4_data);
err_query_dev:
regulator_disable(rmi4_data->regulator);
err_regulator_enable:
regulator_put(rmi4_data->regulator);
err_get_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
kfree(rmi4_data);
return retval;
}
/**
* synaptics_rmi4_remove() - Removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
*
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
{
struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
wake_up(&rmi4_data->wait);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
regulator_disable(rmi4_data->regulator);
regulator_put(rmi4_data->regulator);
kfree(rmi4_data);
return 0;
}
#ifdef CONFIG_PM
/**
* synaptics_rmi4_suspend() - suspend the touch screen controller
* @dev: pointer to device structure
*
* This function is used to suspend the
* touch panel controller and returns integer
*/
static int synaptics_rmi4_suspend(struct device *dev)
{
/* Touch sleep mode */
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
rmi4_data->touch_stopped = true;
disable_irq(pdata->irq_number);
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1,
&intr_status,
rmi4_data->number_of_interrupt_register);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
rmi4_data->fn01_ctrl_base_addr + 1,
(intr_status & ~TOUCHPAD_CTRL_INTR));
if (retval < 0)
return retval;
regulator_disable(rmi4_data->regulator);
return 0;
}
/**
* synaptics_rmi4_resume() - resume the touch screen controller
* @dev: pointer to device structure
*
* This function is used to resume the touch panel
* controller and returns integer.
*/
static int synaptics_rmi4_resume(struct device *dev)
{
int retval;
unsigned char intr_status;
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
regulator_enable(rmi4_data->regulator);
enable_irq(pdata->irq_number);
rmi4_data->touch_stopped = false;
retval = synaptics_rmi4_i2c_block_read(rmi4_data,
rmi4_data->fn01_data_base_addr + 1,
&intr_status,
rmi4_data->number_of_interrupt_register);
if (retval < 0)
return retval;
retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
rmi4_data->fn01_ctrl_base_addr + 1,
(intr_status | TOUCHPAD_CTRL_INTR));
if (retval < 0)
return retval;
return 0;
}
static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
.suspend = synaptics_rmi4_suspend,
.resume = synaptics_rmi4_resume,
};
#endif
static const struct i2c_device_id synaptics_rmi4_id_table[] = {
{ DRIVER_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
static struct i2c_driver synaptics_rmi4_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &synaptics_rmi4_dev_pm_ops,
#endif
},
.probe = synaptics_rmi4_probe,
.remove = __devexit_p(synaptics_rmi4_remove),
.id_table = synaptics_rmi4_id_table,
};
/**
* synaptics_rmi4_init() - Initialize the touchscreen driver
*
* This function uses to initializes the synaptics
* touchscreen driver and returns integer.
*/
static int __init synaptics_rmi4_init(void)
{
return i2c_add_driver(&synaptics_rmi4_driver);
}
/**
* synaptics_rmi4_exit() - De-initialize the touchscreen driver
*
* This function uses to de-initialize the synaptics
* touchscreen driver and returns none.
*/
static void __exit synaptics_rmi4_exit(void)
{
i2c_del_driver(&synaptics_rmi4_driver);
}
module_init(synaptics_rmi4_init);
module_exit(synaptics_rmi4_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
MODULE_ALIAS("i2c:synaptics_rmi4_ts");
| gpl-2.0 |
targetnull/nkvm | sound/soc/codecs/wm5100-tables.c | 4840 | 64689 | /*
* wm5100-tables.c -- WM5100 ALSA SoC Audio driver data
*
* Copyright 2011-2 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "wm5100.h"
bool wm5100_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM5100_SOFTWARE_RESET:
case WM5100_DEVICE_REVISION:
case WM5100_FX_CTRL:
case WM5100_INTERRUPT_STATUS_1:
case WM5100_INTERRUPT_STATUS_2:
case WM5100_INTERRUPT_STATUS_3:
case WM5100_INTERRUPT_STATUS_4:
case WM5100_INTERRUPT_RAW_STATUS_2:
case WM5100_INTERRUPT_RAW_STATUS_3:
case WM5100_INTERRUPT_RAW_STATUS_4:
case WM5100_OUTPUT_STATUS_1:
case WM5100_OUTPUT_STATUS_2:
case WM5100_INPUT_ENABLES_STATUS:
case WM5100_MIC_DETECT_3:
return 1;
default:
if ((reg >= WM5100_DSP1_PM_0 && reg <= WM5100_DSP1_PM_1535) ||
(reg >= WM5100_DSP1_ZM_0 && reg <= WM5100_DSP1_ZM_2047) ||
(reg >= WM5100_DSP1_DM_0 && reg <= WM5100_DSP1_DM_511) ||
(reg >= WM5100_DSP2_PM_0 && reg <= WM5100_DSP2_PM_1535) ||
(reg >= WM5100_DSP2_ZM_0 && reg <= WM5100_DSP2_ZM_2047) ||
(reg >= WM5100_DSP2_DM_0 && reg <= WM5100_DSP2_DM_511) ||
(reg >= WM5100_DSP3_PM_0 && reg <= WM5100_DSP3_PM_1535) ||
(reg >= WM5100_DSP3_ZM_0 && reg <= WM5100_DSP3_ZM_2047) ||
(reg >= WM5100_DSP3_DM_0 && reg <= WM5100_DSP3_DM_511))
return 1;
else
return 0;
}
}
bool wm5100_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM5100_SOFTWARE_RESET:
case WM5100_DEVICE_REVISION:
case WM5100_CTRL_IF_1:
case WM5100_TONE_GENERATOR_1:
case WM5100_PWM_DRIVE_1:
case WM5100_PWM_DRIVE_2:
case WM5100_PWM_DRIVE_3:
case WM5100_CLOCKING_1:
case WM5100_CLOCKING_3:
case WM5100_CLOCKING_4:
case WM5100_CLOCKING_5:
case WM5100_CLOCKING_6:
case WM5100_CLOCKING_7:
case WM5100_CLOCKING_8:
case WM5100_ASRC_ENABLE:
case WM5100_ASRC_STATUS:
case WM5100_ASRC_RATE1:
case WM5100_ISRC_1_CTRL_1:
case WM5100_ISRC_1_CTRL_2:
case WM5100_ISRC_2_CTRL1:
case WM5100_ISRC_2_CTRL_2:
case WM5100_FLL1_CONTROL_1:
case WM5100_FLL1_CONTROL_2:
case WM5100_FLL1_CONTROL_3:
case WM5100_FLL1_CONTROL_5:
case WM5100_FLL1_CONTROL_6:
case WM5100_FLL1_EFS_1:
case WM5100_FLL2_CONTROL_1:
case WM5100_FLL2_CONTROL_2:
case WM5100_FLL2_CONTROL_3:
case WM5100_FLL2_CONTROL_5:
case WM5100_FLL2_CONTROL_6:
case WM5100_FLL2_EFS_1:
case WM5100_MIC_CHARGE_PUMP_1:
case WM5100_MIC_CHARGE_PUMP_2:
case WM5100_HP_CHARGE_PUMP_1:
case WM5100_LDO1_CONTROL:
case WM5100_MIC_BIAS_CTRL_1:
case WM5100_MIC_BIAS_CTRL_2:
case WM5100_MIC_BIAS_CTRL_3:
case WM5100_ACCESSORY_DETECT_MODE_1:
case WM5100_HEADPHONE_DETECT_1:
case WM5100_HEADPHONE_DETECT_2:
case WM5100_MIC_DETECT_1:
case WM5100_MIC_DETECT_2:
case WM5100_MIC_DETECT_3:
case WM5100_MISC_CONTROL:
case WM5100_INPUT_ENABLES:
case WM5100_INPUT_ENABLES_STATUS:
case WM5100_IN1L_CONTROL:
case WM5100_IN1R_CONTROL:
case WM5100_IN2L_CONTROL:
case WM5100_IN2R_CONTROL:
case WM5100_IN3L_CONTROL:
case WM5100_IN3R_CONTROL:
case WM5100_IN4L_CONTROL:
case WM5100_IN4R_CONTROL:
case WM5100_RXANC_SRC:
case WM5100_INPUT_VOLUME_RAMP:
case WM5100_ADC_DIGITAL_VOLUME_1L:
case WM5100_ADC_DIGITAL_VOLUME_1R:
case WM5100_ADC_DIGITAL_VOLUME_2L:
case WM5100_ADC_DIGITAL_VOLUME_2R:
case WM5100_ADC_DIGITAL_VOLUME_3L:
case WM5100_ADC_DIGITAL_VOLUME_3R:
case WM5100_ADC_DIGITAL_VOLUME_4L:
case WM5100_ADC_DIGITAL_VOLUME_4R:
case WM5100_OUTPUT_ENABLES_2:
case WM5100_OUTPUT_STATUS_1:
case WM5100_OUTPUT_STATUS_2:
case WM5100_CHANNEL_ENABLES_1:
case WM5100_OUT_VOLUME_1L:
case WM5100_OUT_VOLUME_1R:
case WM5100_DAC_VOLUME_LIMIT_1L:
case WM5100_DAC_VOLUME_LIMIT_1R:
case WM5100_OUT_VOLUME_2L:
case WM5100_OUT_VOLUME_2R:
case WM5100_DAC_VOLUME_LIMIT_2L:
case WM5100_DAC_VOLUME_LIMIT_2R:
case WM5100_OUT_VOLUME_3L:
case WM5100_OUT_VOLUME_3R:
case WM5100_DAC_VOLUME_LIMIT_3L:
case WM5100_DAC_VOLUME_LIMIT_3R:
case WM5100_OUT_VOLUME_4L:
case WM5100_OUT_VOLUME_4R:
case WM5100_DAC_VOLUME_LIMIT_5L:
case WM5100_DAC_VOLUME_LIMIT_5R:
case WM5100_DAC_VOLUME_LIMIT_6L:
case WM5100_DAC_VOLUME_LIMIT_6R:
case WM5100_DAC_AEC_CONTROL_1:
case WM5100_OUTPUT_VOLUME_RAMP:
case WM5100_DAC_DIGITAL_VOLUME_1L:
case WM5100_DAC_DIGITAL_VOLUME_1R:
case WM5100_DAC_DIGITAL_VOLUME_2L:
case WM5100_DAC_DIGITAL_VOLUME_2R:
case WM5100_DAC_DIGITAL_VOLUME_3L:
case WM5100_DAC_DIGITAL_VOLUME_3R:
case WM5100_DAC_DIGITAL_VOLUME_4L:
case WM5100_DAC_DIGITAL_VOLUME_4R:
case WM5100_DAC_DIGITAL_VOLUME_5L:
case WM5100_DAC_DIGITAL_VOLUME_5R:
case WM5100_DAC_DIGITAL_VOLUME_6L:
case WM5100_DAC_DIGITAL_VOLUME_6R:
case WM5100_PDM_SPK1_CTRL_1:
case WM5100_PDM_SPK1_CTRL_2:
case WM5100_PDM_SPK2_CTRL_1:
case WM5100_PDM_SPK2_CTRL_2:
case WM5100_AUDIO_IF_1_1:
case WM5100_AUDIO_IF_1_2:
case WM5100_AUDIO_IF_1_3:
case WM5100_AUDIO_IF_1_4:
case WM5100_AUDIO_IF_1_5:
case WM5100_AUDIO_IF_1_6:
case WM5100_AUDIO_IF_1_7:
case WM5100_AUDIO_IF_1_8:
case WM5100_AUDIO_IF_1_9:
case WM5100_AUDIO_IF_1_10:
case WM5100_AUDIO_IF_1_11:
case WM5100_AUDIO_IF_1_12:
case WM5100_AUDIO_IF_1_13:
case WM5100_AUDIO_IF_1_14:
case WM5100_AUDIO_IF_1_15:
case WM5100_AUDIO_IF_1_16:
case WM5100_AUDIO_IF_1_17:
case WM5100_AUDIO_IF_1_18:
case WM5100_AUDIO_IF_1_19:
case WM5100_AUDIO_IF_1_20:
case WM5100_AUDIO_IF_1_21:
case WM5100_AUDIO_IF_1_22:
case WM5100_AUDIO_IF_1_23:
case WM5100_AUDIO_IF_1_24:
case WM5100_AUDIO_IF_1_25:
case WM5100_AUDIO_IF_1_26:
case WM5100_AUDIO_IF_1_27:
case WM5100_AUDIO_IF_2_1:
case WM5100_AUDIO_IF_2_2:
case WM5100_AUDIO_IF_2_3:
case WM5100_AUDIO_IF_2_4:
case WM5100_AUDIO_IF_2_5:
case WM5100_AUDIO_IF_2_6:
case WM5100_AUDIO_IF_2_7:
case WM5100_AUDIO_IF_2_8:
case WM5100_AUDIO_IF_2_9:
case WM5100_AUDIO_IF_2_10:
case WM5100_AUDIO_IF_2_11:
case WM5100_AUDIO_IF_2_18:
case WM5100_AUDIO_IF_2_19:
case WM5100_AUDIO_IF_2_26:
case WM5100_AUDIO_IF_2_27:
case WM5100_AUDIO_IF_3_1:
case WM5100_AUDIO_IF_3_2:
case WM5100_AUDIO_IF_3_3:
case WM5100_AUDIO_IF_3_4:
case WM5100_AUDIO_IF_3_5:
case WM5100_AUDIO_IF_3_6:
case WM5100_AUDIO_IF_3_7:
case WM5100_AUDIO_IF_3_8:
case WM5100_AUDIO_IF_3_9:
case WM5100_AUDIO_IF_3_10:
case WM5100_AUDIO_IF_3_11:
case WM5100_AUDIO_IF_3_18:
case WM5100_AUDIO_IF_3_19:
case WM5100_AUDIO_IF_3_26:
case WM5100_AUDIO_IF_3_27:
case WM5100_PWM1MIX_INPUT_1_SOURCE:
case WM5100_PWM1MIX_INPUT_1_VOLUME:
case WM5100_PWM1MIX_INPUT_2_SOURCE:
case WM5100_PWM1MIX_INPUT_2_VOLUME:
case WM5100_PWM1MIX_INPUT_3_SOURCE:
case WM5100_PWM1MIX_INPUT_3_VOLUME:
case WM5100_PWM1MIX_INPUT_4_SOURCE:
case WM5100_PWM1MIX_INPUT_4_VOLUME:
case WM5100_PWM2MIX_INPUT_1_SOURCE:
case WM5100_PWM2MIX_INPUT_1_VOLUME:
case WM5100_PWM2MIX_INPUT_2_SOURCE:
case WM5100_PWM2MIX_INPUT_2_VOLUME:
case WM5100_PWM2MIX_INPUT_3_SOURCE:
case WM5100_PWM2MIX_INPUT_3_VOLUME:
case WM5100_PWM2MIX_INPUT_4_SOURCE:
case WM5100_PWM2MIX_INPUT_4_VOLUME:
case WM5100_OUT1LMIX_INPUT_1_SOURCE:
case WM5100_OUT1LMIX_INPUT_1_VOLUME:
case WM5100_OUT1LMIX_INPUT_2_SOURCE:
case WM5100_OUT1LMIX_INPUT_2_VOLUME:
case WM5100_OUT1LMIX_INPUT_3_SOURCE:
case WM5100_OUT1LMIX_INPUT_3_VOLUME:
case WM5100_OUT1LMIX_INPUT_4_SOURCE:
case WM5100_OUT1LMIX_INPUT_4_VOLUME:
case WM5100_OUT1RMIX_INPUT_1_SOURCE:
case WM5100_OUT1RMIX_INPUT_1_VOLUME:
case WM5100_OUT1RMIX_INPUT_2_SOURCE:
case WM5100_OUT1RMIX_INPUT_2_VOLUME:
case WM5100_OUT1RMIX_INPUT_3_SOURCE:
case WM5100_OUT1RMIX_INPUT_3_VOLUME:
case WM5100_OUT1RMIX_INPUT_4_SOURCE:
case WM5100_OUT1RMIX_INPUT_4_VOLUME:
case WM5100_OUT2LMIX_INPUT_1_SOURCE:
case WM5100_OUT2LMIX_INPUT_1_VOLUME:
case WM5100_OUT2LMIX_INPUT_2_SOURCE:
case WM5100_OUT2LMIX_INPUT_2_VOLUME:
case WM5100_OUT2LMIX_INPUT_3_SOURCE:
case WM5100_OUT2LMIX_INPUT_3_VOLUME:
case WM5100_OUT2LMIX_INPUT_4_SOURCE:
case WM5100_OUT2LMIX_INPUT_4_VOLUME:
case WM5100_OUT2RMIX_INPUT_1_SOURCE:
case WM5100_OUT2RMIX_INPUT_1_VOLUME:
case WM5100_OUT2RMIX_INPUT_2_SOURCE:
case WM5100_OUT2RMIX_INPUT_2_VOLUME:
case WM5100_OUT2RMIX_INPUT_3_SOURCE:
case WM5100_OUT2RMIX_INPUT_3_VOLUME:
case WM5100_OUT2RMIX_INPUT_4_SOURCE:
case WM5100_OUT2RMIX_INPUT_4_VOLUME:
case WM5100_OUT3LMIX_INPUT_1_SOURCE:
case WM5100_OUT3LMIX_INPUT_1_VOLUME:
case WM5100_OUT3LMIX_INPUT_2_SOURCE:
case WM5100_OUT3LMIX_INPUT_2_VOLUME:
case WM5100_OUT3LMIX_INPUT_3_SOURCE:
case WM5100_OUT3LMIX_INPUT_3_VOLUME:
case WM5100_OUT3LMIX_INPUT_4_SOURCE:
case WM5100_OUT3LMIX_INPUT_4_VOLUME:
case WM5100_OUT3RMIX_INPUT_1_SOURCE:
case WM5100_OUT3RMIX_INPUT_1_VOLUME:
case WM5100_OUT3RMIX_INPUT_2_SOURCE:
case WM5100_OUT3RMIX_INPUT_2_VOLUME:
case WM5100_OUT3RMIX_INPUT_3_SOURCE:
case WM5100_OUT3RMIX_INPUT_3_VOLUME:
case WM5100_OUT3RMIX_INPUT_4_SOURCE:
case WM5100_OUT3RMIX_INPUT_4_VOLUME:
case WM5100_OUT4LMIX_INPUT_1_SOURCE:
case WM5100_OUT4LMIX_INPUT_1_VOLUME:
case WM5100_OUT4LMIX_INPUT_2_SOURCE:
case WM5100_OUT4LMIX_INPUT_2_VOLUME:
case WM5100_OUT4LMIX_INPUT_3_SOURCE:
case WM5100_OUT4LMIX_INPUT_3_VOLUME:
case WM5100_OUT4LMIX_INPUT_4_SOURCE:
case WM5100_OUT4LMIX_INPUT_4_VOLUME:
case WM5100_OUT4RMIX_INPUT_1_SOURCE:
case WM5100_OUT4RMIX_INPUT_1_VOLUME:
case WM5100_OUT4RMIX_INPUT_2_SOURCE:
case WM5100_OUT4RMIX_INPUT_2_VOLUME:
case WM5100_OUT4RMIX_INPUT_3_SOURCE:
case WM5100_OUT4RMIX_INPUT_3_VOLUME:
case WM5100_OUT4RMIX_INPUT_4_SOURCE:
case WM5100_OUT4RMIX_INPUT_4_VOLUME:
case WM5100_OUT5LMIX_INPUT_1_SOURCE:
case WM5100_OUT5LMIX_INPUT_1_VOLUME:
case WM5100_OUT5LMIX_INPUT_2_SOURCE:
case WM5100_OUT5LMIX_INPUT_2_VOLUME:
case WM5100_OUT5LMIX_INPUT_3_SOURCE:
case WM5100_OUT5LMIX_INPUT_3_VOLUME:
case WM5100_OUT5LMIX_INPUT_4_SOURCE:
case WM5100_OUT5LMIX_INPUT_4_VOLUME:
case WM5100_OUT5RMIX_INPUT_1_SOURCE:
case WM5100_OUT5RMIX_INPUT_1_VOLUME:
case WM5100_OUT5RMIX_INPUT_2_SOURCE:
case WM5100_OUT5RMIX_INPUT_2_VOLUME:
case WM5100_OUT5RMIX_INPUT_3_SOURCE:
case WM5100_OUT5RMIX_INPUT_3_VOLUME:
case WM5100_OUT5RMIX_INPUT_4_SOURCE:
case WM5100_OUT5RMIX_INPUT_4_VOLUME:
case WM5100_OUT6LMIX_INPUT_1_SOURCE:
case WM5100_OUT6LMIX_INPUT_1_VOLUME:
case WM5100_OUT6LMIX_INPUT_2_SOURCE:
case WM5100_OUT6LMIX_INPUT_2_VOLUME:
case WM5100_OUT6LMIX_INPUT_3_SOURCE:
case WM5100_OUT6LMIX_INPUT_3_VOLUME:
case WM5100_OUT6LMIX_INPUT_4_SOURCE:
case WM5100_OUT6LMIX_INPUT_4_VOLUME:
case WM5100_OUT6RMIX_INPUT_1_SOURCE:
case WM5100_OUT6RMIX_INPUT_1_VOLUME:
case WM5100_OUT6RMIX_INPUT_2_SOURCE:
case WM5100_OUT6RMIX_INPUT_2_VOLUME:
case WM5100_OUT6RMIX_INPUT_3_SOURCE:
case WM5100_OUT6RMIX_INPUT_3_VOLUME:
case WM5100_OUT6RMIX_INPUT_4_SOURCE:
case WM5100_OUT6RMIX_INPUT_4_VOLUME:
case WM5100_AIF1TX1MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX1MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX1MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX1MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX1MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX1MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX1MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX1MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX2MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX2MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX2MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX2MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX2MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX2MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX2MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX2MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX3MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX3MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX3MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX3MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX3MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX3MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX3MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX3MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX4MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX4MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX4MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX4MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX4MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX4MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX4MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX4MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX5MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX5MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX5MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX5MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX5MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX5MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX5MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX5MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX6MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX6MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX6MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX6MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX6MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX6MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX6MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX6MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX7MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX7MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX7MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX7MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX7MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX7MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX7MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX7MIX_INPUT_4_VOLUME:
case WM5100_AIF1TX8MIX_INPUT_1_SOURCE:
case WM5100_AIF1TX8MIX_INPUT_1_VOLUME:
case WM5100_AIF1TX8MIX_INPUT_2_SOURCE:
case WM5100_AIF1TX8MIX_INPUT_2_VOLUME:
case WM5100_AIF1TX8MIX_INPUT_3_SOURCE:
case WM5100_AIF1TX8MIX_INPUT_3_VOLUME:
case WM5100_AIF1TX8MIX_INPUT_4_SOURCE:
case WM5100_AIF1TX8MIX_INPUT_4_VOLUME:
case WM5100_AIF2TX1MIX_INPUT_1_SOURCE:
case WM5100_AIF2TX1MIX_INPUT_1_VOLUME:
case WM5100_AIF2TX1MIX_INPUT_2_SOURCE:
case WM5100_AIF2TX1MIX_INPUT_2_VOLUME:
case WM5100_AIF2TX1MIX_INPUT_3_SOURCE:
case WM5100_AIF2TX1MIX_INPUT_3_VOLUME:
case WM5100_AIF2TX1MIX_INPUT_4_SOURCE:
case WM5100_AIF2TX1MIX_INPUT_4_VOLUME:
case WM5100_AIF2TX2MIX_INPUT_1_SOURCE:
case WM5100_AIF2TX2MIX_INPUT_1_VOLUME:
case WM5100_AIF2TX2MIX_INPUT_2_SOURCE:
case WM5100_AIF2TX2MIX_INPUT_2_VOLUME:
case WM5100_AIF2TX2MIX_INPUT_3_SOURCE:
case WM5100_AIF2TX2MIX_INPUT_3_VOLUME:
case WM5100_AIF2TX2MIX_INPUT_4_SOURCE:
case WM5100_AIF2TX2MIX_INPUT_4_VOLUME:
case WM5100_AIF3TX1MIX_INPUT_1_SOURCE:
case WM5100_AIF3TX1MIX_INPUT_1_VOLUME:
case WM5100_AIF3TX1MIX_INPUT_2_SOURCE:
case WM5100_AIF3TX1MIX_INPUT_2_VOLUME:
case WM5100_AIF3TX1MIX_INPUT_3_SOURCE:
case WM5100_AIF3TX1MIX_INPUT_3_VOLUME:
case WM5100_AIF3TX1MIX_INPUT_4_SOURCE:
case WM5100_AIF3TX1MIX_INPUT_4_VOLUME:
case WM5100_AIF3TX2MIX_INPUT_1_SOURCE:
case WM5100_AIF3TX2MIX_INPUT_1_VOLUME:
case WM5100_AIF3TX2MIX_INPUT_2_SOURCE:
case WM5100_AIF3TX2MIX_INPUT_2_VOLUME:
case WM5100_AIF3TX2MIX_INPUT_3_SOURCE:
case WM5100_AIF3TX2MIX_INPUT_3_VOLUME:
case WM5100_AIF3TX2MIX_INPUT_4_SOURCE:
case WM5100_AIF3TX2MIX_INPUT_4_VOLUME:
case WM5100_EQ1MIX_INPUT_1_SOURCE:
case WM5100_EQ1MIX_INPUT_1_VOLUME:
case WM5100_EQ1MIX_INPUT_2_SOURCE:
case WM5100_EQ1MIX_INPUT_2_VOLUME:
case WM5100_EQ1MIX_INPUT_3_SOURCE:
case WM5100_EQ1MIX_INPUT_3_VOLUME:
case WM5100_EQ1MIX_INPUT_4_SOURCE:
case WM5100_EQ1MIX_INPUT_4_VOLUME:
case WM5100_EQ2MIX_INPUT_1_SOURCE:
case WM5100_EQ2MIX_INPUT_1_VOLUME:
case WM5100_EQ2MIX_INPUT_2_SOURCE:
case WM5100_EQ2MIX_INPUT_2_VOLUME:
case WM5100_EQ2MIX_INPUT_3_SOURCE:
case WM5100_EQ2MIX_INPUT_3_VOLUME:
case WM5100_EQ2MIX_INPUT_4_SOURCE:
case WM5100_EQ2MIX_INPUT_4_VOLUME:
case WM5100_EQ3MIX_INPUT_1_SOURCE:
case WM5100_EQ3MIX_INPUT_1_VOLUME:
case WM5100_EQ3MIX_INPUT_2_SOURCE:
case WM5100_EQ3MIX_INPUT_2_VOLUME:
case WM5100_EQ3MIX_INPUT_3_SOURCE:
case WM5100_EQ3MIX_INPUT_3_VOLUME:
case WM5100_EQ3MIX_INPUT_4_SOURCE:
case WM5100_EQ3MIX_INPUT_4_VOLUME:
case WM5100_EQ4MIX_INPUT_1_SOURCE:
case WM5100_EQ4MIX_INPUT_1_VOLUME:
case WM5100_EQ4MIX_INPUT_2_SOURCE:
case WM5100_EQ4MIX_INPUT_2_VOLUME:
case WM5100_EQ4MIX_INPUT_3_SOURCE:
case WM5100_EQ4MIX_INPUT_3_VOLUME:
case WM5100_EQ4MIX_INPUT_4_SOURCE:
case WM5100_EQ4MIX_INPUT_4_VOLUME:
case WM5100_DRC1LMIX_INPUT_1_SOURCE:
case WM5100_DRC1LMIX_INPUT_1_VOLUME:
case WM5100_DRC1LMIX_INPUT_2_SOURCE:
case WM5100_DRC1LMIX_INPUT_2_VOLUME:
case WM5100_DRC1LMIX_INPUT_3_SOURCE:
case WM5100_DRC1LMIX_INPUT_3_VOLUME:
case WM5100_DRC1LMIX_INPUT_4_SOURCE:
case WM5100_DRC1LMIX_INPUT_4_VOLUME:
case WM5100_DRC1RMIX_INPUT_1_SOURCE:
case WM5100_DRC1RMIX_INPUT_1_VOLUME:
case WM5100_DRC1RMIX_INPUT_2_SOURCE:
case WM5100_DRC1RMIX_INPUT_2_VOLUME:
case WM5100_DRC1RMIX_INPUT_3_SOURCE:
case WM5100_DRC1RMIX_INPUT_3_VOLUME:
case WM5100_DRC1RMIX_INPUT_4_SOURCE:
case WM5100_DRC1RMIX_INPUT_4_VOLUME:
case WM5100_HPLP1MIX_INPUT_1_SOURCE:
case WM5100_HPLP1MIX_INPUT_1_VOLUME:
case WM5100_HPLP1MIX_INPUT_2_SOURCE:
case WM5100_HPLP1MIX_INPUT_2_VOLUME:
case WM5100_HPLP1MIX_INPUT_3_SOURCE:
case WM5100_HPLP1MIX_INPUT_3_VOLUME:
case WM5100_HPLP1MIX_INPUT_4_SOURCE:
case WM5100_HPLP1MIX_INPUT_4_VOLUME:
case WM5100_HPLP2MIX_INPUT_1_SOURCE:
case WM5100_HPLP2MIX_INPUT_1_VOLUME:
case WM5100_HPLP2MIX_INPUT_2_SOURCE:
case WM5100_HPLP2MIX_INPUT_2_VOLUME:
case WM5100_HPLP2MIX_INPUT_3_SOURCE:
case WM5100_HPLP2MIX_INPUT_3_VOLUME:
case WM5100_HPLP2MIX_INPUT_4_SOURCE:
case WM5100_HPLP2MIX_INPUT_4_VOLUME:
case WM5100_HPLP3MIX_INPUT_1_SOURCE:
case WM5100_HPLP3MIX_INPUT_1_VOLUME:
case WM5100_HPLP3MIX_INPUT_2_SOURCE:
case WM5100_HPLP3MIX_INPUT_2_VOLUME:
case WM5100_HPLP3MIX_INPUT_3_SOURCE:
case WM5100_HPLP3MIX_INPUT_3_VOLUME:
case WM5100_HPLP3MIX_INPUT_4_SOURCE:
case WM5100_HPLP3MIX_INPUT_4_VOLUME:
case WM5100_HPLP4MIX_INPUT_1_SOURCE:
case WM5100_HPLP4MIX_INPUT_1_VOLUME:
case WM5100_HPLP4MIX_INPUT_2_SOURCE:
case WM5100_HPLP4MIX_INPUT_2_VOLUME:
case WM5100_HPLP4MIX_INPUT_3_SOURCE:
case WM5100_HPLP4MIX_INPUT_3_VOLUME:
case WM5100_HPLP4MIX_INPUT_4_SOURCE:
case WM5100_HPLP4MIX_INPUT_4_VOLUME:
case WM5100_DSP1LMIX_INPUT_1_SOURCE:
case WM5100_DSP1LMIX_INPUT_1_VOLUME:
case WM5100_DSP1LMIX_INPUT_2_SOURCE:
case WM5100_DSP1LMIX_INPUT_2_VOLUME:
case WM5100_DSP1LMIX_INPUT_3_SOURCE:
case WM5100_DSP1LMIX_INPUT_3_VOLUME:
case WM5100_DSP1LMIX_INPUT_4_SOURCE:
case WM5100_DSP1LMIX_INPUT_4_VOLUME:
case WM5100_DSP1RMIX_INPUT_1_SOURCE:
case WM5100_DSP1RMIX_INPUT_1_VOLUME:
case WM5100_DSP1RMIX_INPUT_2_SOURCE:
case WM5100_DSP1RMIX_INPUT_2_VOLUME:
case WM5100_DSP1RMIX_INPUT_3_SOURCE:
case WM5100_DSP1RMIX_INPUT_3_VOLUME:
case WM5100_DSP1RMIX_INPUT_4_SOURCE:
case WM5100_DSP1RMIX_INPUT_4_VOLUME:
case WM5100_DSP1AUX1MIX_INPUT_1_SOURCE:
case WM5100_DSP1AUX2MIX_INPUT_1_SOURCE:
case WM5100_DSP1AUX3MIX_INPUT_1_SOURCE:
case WM5100_DSP1AUX4MIX_INPUT_1_SOURCE:
case WM5100_DSP1AUX5MIX_INPUT_1_SOURCE:
case WM5100_DSP1AUX6MIX_INPUT_1_SOURCE:
case WM5100_DSP2LMIX_INPUT_1_SOURCE:
case WM5100_DSP2LMIX_INPUT_1_VOLUME:
case WM5100_DSP2LMIX_INPUT_2_SOURCE:
case WM5100_DSP2LMIX_INPUT_2_VOLUME:
case WM5100_DSP2LMIX_INPUT_3_SOURCE:
case WM5100_DSP2LMIX_INPUT_3_VOLUME:
case WM5100_DSP2LMIX_INPUT_4_SOURCE:
case WM5100_DSP2LMIX_INPUT_4_VOLUME:
case WM5100_DSP2RMIX_INPUT_1_SOURCE:
case WM5100_DSP2RMIX_INPUT_1_VOLUME:
case WM5100_DSP2RMIX_INPUT_2_SOURCE:
case WM5100_DSP2RMIX_INPUT_2_VOLUME:
case WM5100_DSP2RMIX_INPUT_3_SOURCE:
case WM5100_DSP2RMIX_INPUT_3_VOLUME:
case WM5100_DSP2RMIX_INPUT_4_SOURCE:
case WM5100_DSP2RMIX_INPUT_4_VOLUME:
case WM5100_DSP2AUX1MIX_INPUT_1_SOURCE:
case WM5100_DSP2AUX2MIX_INPUT_1_SOURCE:
case WM5100_DSP2AUX3MIX_INPUT_1_SOURCE:
case WM5100_DSP2AUX4MIX_INPUT_1_SOURCE:
case WM5100_DSP2AUX5MIX_INPUT_1_SOURCE:
case WM5100_DSP2AUX6MIX_INPUT_1_SOURCE:
case WM5100_DSP3LMIX_INPUT_1_SOURCE:
case WM5100_DSP3LMIX_INPUT_1_VOLUME:
case WM5100_DSP3LMIX_INPUT_2_SOURCE:
case WM5100_DSP3LMIX_INPUT_2_VOLUME:
case WM5100_DSP3LMIX_INPUT_3_SOURCE:
case WM5100_DSP3LMIX_INPUT_3_VOLUME:
case WM5100_DSP3LMIX_INPUT_4_SOURCE:
case WM5100_DSP3LMIX_INPUT_4_VOLUME:
case WM5100_DSP3RMIX_INPUT_1_SOURCE:
case WM5100_DSP3RMIX_INPUT_1_VOLUME:
case WM5100_DSP3RMIX_INPUT_2_SOURCE:
case WM5100_DSP3RMIX_INPUT_2_VOLUME:
case WM5100_DSP3RMIX_INPUT_3_SOURCE:
case WM5100_DSP3RMIX_INPUT_3_VOLUME:
case WM5100_DSP3RMIX_INPUT_4_SOURCE:
case WM5100_DSP3RMIX_INPUT_4_VOLUME:
case WM5100_DSP3AUX1MIX_INPUT_1_SOURCE:
case WM5100_DSP3AUX2MIX_INPUT_1_SOURCE:
case WM5100_DSP3AUX3MIX_INPUT_1_SOURCE:
case WM5100_DSP3AUX4MIX_INPUT_1_SOURCE:
case WM5100_DSP3AUX5MIX_INPUT_1_SOURCE:
case WM5100_DSP3AUX6MIX_INPUT_1_SOURCE:
case WM5100_ASRC1LMIX_INPUT_1_SOURCE:
case WM5100_ASRC1RMIX_INPUT_1_SOURCE:
case WM5100_ASRC2LMIX_INPUT_1_SOURCE:
case WM5100_ASRC2RMIX_INPUT_1_SOURCE:
case WM5100_ISRC1DEC1MIX_INPUT_1_SOURCE:
case WM5100_ISRC1DEC2MIX_INPUT_1_SOURCE:
case WM5100_ISRC1DEC3MIX_INPUT_1_SOURCE:
case WM5100_ISRC1DEC4MIX_INPUT_1_SOURCE:
case WM5100_ISRC1INT1MIX_INPUT_1_SOURCE:
case WM5100_ISRC1INT2MIX_INPUT_1_SOURCE:
case WM5100_ISRC1INT3MIX_INPUT_1_SOURCE:
case WM5100_ISRC1INT4MIX_INPUT_1_SOURCE:
case WM5100_ISRC2DEC1MIX_INPUT_1_SOURCE:
case WM5100_ISRC2DEC2MIX_INPUT_1_SOURCE:
case WM5100_ISRC2DEC3MIX_INPUT_1_SOURCE:
case WM5100_ISRC2DEC4MIX_INPUT_1_SOURCE:
case WM5100_ISRC2INT1MIX_INPUT_1_SOURCE:
case WM5100_ISRC2INT2MIX_INPUT_1_SOURCE:
case WM5100_ISRC2INT3MIX_INPUT_1_SOURCE:
case WM5100_ISRC2INT4MIX_INPUT_1_SOURCE:
case WM5100_GPIO_CTRL_1:
case WM5100_GPIO_CTRL_2:
case WM5100_GPIO_CTRL_3:
case WM5100_GPIO_CTRL_4:
case WM5100_GPIO_CTRL_5:
case WM5100_GPIO_CTRL_6:
case WM5100_MISC_PAD_CTRL_1:
case WM5100_MISC_PAD_CTRL_2:
case WM5100_MISC_PAD_CTRL_3:
case WM5100_MISC_PAD_CTRL_4:
case WM5100_MISC_PAD_CTRL_5:
case WM5100_MISC_GPIO_1:
case WM5100_INTERRUPT_STATUS_1:
case WM5100_INTERRUPT_STATUS_2:
case WM5100_INTERRUPT_STATUS_3:
case WM5100_INTERRUPT_STATUS_4:
case WM5100_INTERRUPT_RAW_STATUS_2:
case WM5100_INTERRUPT_RAW_STATUS_3:
case WM5100_INTERRUPT_RAW_STATUS_4:
case WM5100_INTERRUPT_STATUS_1_MASK:
case WM5100_INTERRUPT_STATUS_2_MASK:
case WM5100_INTERRUPT_STATUS_3_MASK:
case WM5100_INTERRUPT_STATUS_4_MASK:
case WM5100_INTERRUPT_CONTROL:
case WM5100_IRQ_DEBOUNCE_1:
case WM5100_IRQ_DEBOUNCE_2:
case WM5100_FX_CTRL:
case WM5100_EQ1_1:
case WM5100_EQ1_2:
case WM5100_EQ1_3:
case WM5100_EQ1_4:
case WM5100_EQ1_5:
case WM5100_EQ1_6:
case WM5100_EQ1_7:
case WM5100_EQ1_8:
case WM5100_EQ1_9:
case WM5100_EQ1_10:
case WM5100_EQ1_11:
case WM5100_EQ1_12:
case WM5100_EQ1_13:
case WM5100_EQ1_14:
case WM5100_EQ1_15:
case WM5100_EQ1_16:
case WM5100_EQ1_17:
case WM5100_EQ1_18:
case WM5100_EQ1_19:
case WM5100_EQ1_20:
case WM5100_EQ2_1:
case WM5100_EQ2_2:
case WM5100_EQ2_3:
case WM5100_EQ2_4:
case WM5100_EQ2_5:
case WM5100_EQ2_6:
case WM5100_EQ2_7:
case WM5100_EQ2_8:
case WM5100_EQ2_9:
case WM5100_EQ2_10:
case WM5100_EQ2_11:
case WM5100_EQ2_12:
case WM5100_EQ2_13:
case WM5100_EQ2_14:
case WM5100_EQ2_15:
case WM5100_EQ2_16:
case WM5100_EQ2_17:
case WM5100_EQ2_18:
case WM5100_EQ2_19:
case WM5100_EQ2_20:
case WM5100_EQ3_1:
case WM5100_EQ3_2:
case WM5100_EQ3_3:
case WM5100_EQ3_4:
case WM5100_EQ3_5:
case WM5100_EQ3_6:
case WM5100_EQ3_7:
case WM5100_EQ3_8:
case WM5100_EQ3_9:
case WM5100_EQ3_10:
case WM5100_EQ3_11:
case WM5100_EQ3_12:
case WM5100_EQ3_13:
case WM5100_EQ3_14:
case WM5100_EQ3_15:
case WM5100_EQ3_16:
case WM5100_EQ3_17:
case WM5100_EQ3_18:
case WM5100_EQ3_19:
case WM5100_EQ3_20:
case WM5100_EQ4_1:
case WM5100_EQ4_2:
case WM5100_EQ4_3:
case WM5100_EQ4_4:
case WM5100_EQ4_5:
case WM5100_EQ4_6:
case WM5100_EQ4_7:
case WM5100_EQ4_8:
case WM5100_EQ4_9:
case WM5100_EQ4_10:
case WM5100_EQ4_11:
case WM5100_EQ4_12:
case WM5100_EQ4_13:
case WM5100_EQ4_14:
case WM5100_EQ4_15:
case WM5100_EQ4_16:
case WM5100_EQ4_17:
case WM5100_EQ4_18:
case WM5100_EQ4_19:
case WM5100_EQ4_20:
case WM5100_DRC1_CTRL1:
case WM5100_DRC1_CTRL2:
case WM5100_DRC1_CTRL3:
case WM5100_DRC1_CTRL4:
case WM5100_DRC1_CTRL5:
case WM5100_HPLPF1_1:
case WM5100_HPLPF1_2:
case WM5100_HPLPF2_1:
case WM5100_HPLPF2_2:
case WM5100_HPLPF3_1:
case WM5100_HPLPF3_2:
case WM5100_HPLPF4_1:
case WM5100_HPLPF4_2:
case WM5100_DSP1_CONTROL_1:
case WM5100_DSP1_CONTROL_2:
case WM5100_DSP1_CONTROL_3:
case WM5100_DSP1_CONTROL_4:
case WM5100_DSP1_CONTROL_5:
case WM5100_DSP1_CONTROL_6:
case WM5100_DSP1_CONTROL_7:
case WM5100_DSP1_CONTROL_8:
case WM5100_DSP1_CONTROL_9:
case WM5100_DSP1_CONTROL_10:
case WM5100_DSP1_CONTROL_11:
case WM5100_DSP1_CONTROL_12:
case WM5100_DSP1_CONTROL_13:
case WM5100_DSP1_CONTROL_14:
case WM5100_DSP1_CONTROL_15:
case WM5100_DSP1_CONTROL_16:
case WM5100_DSP1_CONTROL_17:
case WM5100_DSP1_CONTROL_18:
case WM5100_DSP1_CONTROL_19:
case WM5100_DSP1_CONTROL_20:
case WM5100_DSP1_CONTROL_21:
case WM5100_DSP1_CONTROL_22:
case WM5100_DSP1_CONTROL_23:
case WM5100_DSP1_CONTROL_24:
case WM5100_DSP1_CONTROL_25:
case WM5100_DSP1_CONTROL_26:
case WM5100_DSP1_CONTROL_27:
case WM5100_DSP1_CONTROL_28:
case WM5100_DSP1_CONTROL_29:
case WM5100_DSP1_CONTROL_30:
case WM5100_DSP2_CONTROL_1:
case WM5100_DSP2_CONTROL_2:
case WM5100_DSP2_CONTROL_3:
case WM5100_DSP2_CONTROL_4:
case WM5100_DSP2_CONTROL_5:
case WM5100_DSP2_CONTROL_6:
case WM5100_DSP2_CONTROL_7:
case WM5100_DSP2_CONTROL_8:
case WM5100_DSP2_CONTROL_9:
case WM5100_DSP2_CONTROL_10:
case WM5100_DSP2_CONTROL_11:
case WM5100_DSP2_CONTROL_12:
case WM5100_DSP2_CONTROL_13:
case WM5100_DSP2_CONTROL_14:
case WM5100_DSP2_CONTROL_15:
case WM5100_DSP2_CONTROL_16:
case WM5100_DSP2_CONTROL_17:
case WM5100_DSP2_CONTROL_18:
case WM5100_DSP2_CONTROL_19:
case WM5100_DSP2_CONTROL_20:
case WM5100_DSP2_CONTROL_21:
case WM5100_DSP2_CONTROL_22:
case WM5100_DSP2_CONTROL_23:
case WM5100_DSP2_CONTROL_24:
case WM5100_DSP2_CONTROL_25:
case WM5100_DSP2_CONTROL_26:
case WM5100_DSP2_CONTROL_27:
case WM5100_DSP2_CONTROL_28:
case WM5100_DSP2_CONTROL_29:
case WM5100_DSP2_CONTROL_30:
case WM5100_DSP3_CONTROL_1:
case WM5100_DSP3_CONTROL_2:
case WM5100_DSP3_CONTROL_3:
case WM5100_DSP3_CONTROL_4:
case WM5100_DSP3_CONTROL_5:
case WM5100_DSP3_CONTROL_6:
case WM5100_DSP3_CONTROL_7:
case WM5100_DSP3_CONTROL_8:
case WM5100_DSP3_CONTROL_9:
case WM5100_DSP3_CONTROL_10:
case WM5100_DSP3_CONTROL_11:
case WM5100_DSP3_CONTROL_12:
case WM5100_DSP3_CONTROL_13:
case WM5100_DSP3_CONTROL_14:
case WM5100_DSP3_CONTROL_15:
case WM5100_DSP3_CONTROL_16:
case WM5100_DSP3_CONTROL_17:
case WM5100_DSP3_CONTROL_18:
case WM5100_DSP3_CONTROL_19:
case WM5100_DSP3_CONTROL_20:
case WM5100_DSP3_CONTROL_21:
case WM5100_DSP3_CONTROL_22:
case WM5100_DSP3_CONTROL_23:
case WM5100_DSP3_CONTROL_24:
case WM5100_DSP3_CONTROL_25:
case WM5100_DSP3_CONTROL_26:
case WM5100_DSP3_CONTROL_27:
case WM5100_DSP3_CONTROL_28:
case WM5100_DSP3_CONTROL_29:
case WM5100_DSP3_CONTROL_30:
return 1;
default:
if ((reg >= WM5100_DSP1_PM_0 && reg <= WM5100_DSP1_PM_1535) ||
(reg >= WM5100_DSP1_ZM_0 && reg <= WM5100_DSP1_ZM_2047) ||
(reg >= WM5100_DSP1_DM_0 && reg <= WM5100_DSP1_DM_511) ||
(reg >= WM5100_DSP2_PM_0 && reg <= WM5100_DSP2_PM_1535) ||
(reg >= WM5100_DSP2_ZM_0 && reg <= WM5100_DSP2_ZM_2047) ||
(reg >= WM5100_DSP2_DM_0 && reg <= WM5100_DSP2_DM_511) ||
(reg >= WM5100_DSP3_PM_0 && reg <= WM5100_DSP3_PM_1535) ||
(reg >= WM5100_DSP3_ZM_0 && reg <= WM5100_DSP3_ZM_2047) ||
(reg >= WM5100_DSP3_DM_0 && reg <= WM5100_DSP3_DM_511))
return 1;
else
return 0;
}
}
struct reg_default wm5100_reg_defaults[WM5100_REGISTER_COUNT] = {
{ 0x0000, 0x0000 }, /* R0 - software reset */
{ 0x0001, 0x0000 }, /* R1 - Device Revision */
{ 0x0010, 0x0801 }, /* R16 - Ctrl IF 1 */
{ 0x0020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x0030, 0x0000 }, /* R48 - PWM Drive 1 */
{ 0x0031, 0x0100 }, /* R49 - PWM Drive 2 */
{ 0x0032, 0x0100 }, /* R50 - PWM Drive 3 */
{ 0x0100, 0x0002 }, /* R256 - Clocking 1 */
{ 0x0101, 0x0000 }, /* R257 - Clocking 3 */
{ 0x0102, 0x0011 }, /* R258 - Clocking 4 */
{ 0x0103, 0x0011 }, /* R259 - Clocking 5 */
{ 0x0104, 0x0011 }, /* R260 - Clocking 6 */
{ 0x0107, 0x0000 }, /* R263 - Clocking 7 */
{ 0x0108, 0x0000 }, /* R264 - Clocking 8 */
{ 0x0120, 0x0000 }, /* R288 - ASRC_ENABLE */
{ 0x0121, 0x0000 }, /* R289 - ASRC_STATUS */
{ 0x0122, 0x0000 }, /* R290 - ASRC_RATE1 */
{ 0x0141, 0x8000 }, /* R321 - ISRC 1 CTRL 1 */
{ 0x0142, 0x0000 }, /* R322 - ISRC 1 CTRL 2 */
{ 0x0143, 0x8000 }, /* R323 - ISRC 2 CTRL1 */
{ 0x0144, 0x0000 }, /* R324 - ISRC 2 CTRL 2 */
{ 0x0182, 0x0000 }, /* R386 - FLL1 Control 1 */
{ 0x0183, 0x0000 }, /* R387 - FLL1 Control 2 */
{ 0x0184, 0x0000 }, /* R388 - FLL1 Control 3 */
{ 0x0186, 0x0177 }, /* R390 - FLL1 Control 5 */
{ 0x0187, 0x0001 }, /* R391 - FLL1 Control 6 */
{ 0x0188, 0x0000 }, /* R392 - FLL1 EFS 1 */
{ 0x01A2, 0x0000 }, /* R418 - FLL2 Control 1 */
{ 0x01A3, 0x0000 }, /* R419 - FLL2 Control 2 */
{ 0x01A4, 0x0000 }, /* R420 - FLL2 Control 3 */
{ 0x01A6, 0x0177 }, /* R422 - FLL2 Control 5 */
{ 0x01A7, 0x0001 }, /* R423 - FLL2 Control 6 */
{ 0x01A8, 0x0000 }, /* R424 - FLL2 EFS 1 */
{ 0x0200, 0x0020 }, /* R512 - Mic Charge Pump 1 */
{ 0x0201, 0xB084 }, /* R513 - Mic Charge Pump 2 */
{ 0x0202, 0xBBDE }, /* R514 - HP Charge Pump 1 */
{ 0x0211, 0x20D4 }, /* R529 - LDO1 Control */
{ 0x0215, 0x0062 }, /* R533 - Mic Bias Ctrl 1 */
{ 0x0216, 0x0062 }, /* R534 - Mic Bias Ctrl 2 */
{ 0x0217, 0x0062 }, /* R535 - Mic Bias Ctrl 3 */
{ 0x0280, 0x0004 }, /* R640 - Accessory Detect Mode 1 */
{ 0x0288, 0x0020 }, /* R648 - Headphone Detect 1 */
{ 0x0289, 0x0000 }, /* R649 - Headphone Detect 2 */
{ 0x0290, 0x1100 }, /* R656 - Mic Detect 1 */
{ 0x0291, 0x009F }, /* R657 - Mic Detect 2 */
{ 0x0292, 0x0000 }, /* R658 - Mic Detect 3 */
{ 0x0301, 0x0000 }, /* R769 - Input Enables */
{ 0x0302, 0x0000 }, /* R770 - Input Enables Status */
{ 0x0310, 0x2280 }, /* R784 - Status */
{ 0x0311, 0x0080 }, /* R785 - IN1R Control */
{ 0x0312, 0x2280 }, /* R786 - IN2L Control */
{ 0x0313, 0x0080 }, /* R787 - IN2R Control */
{ 0x0314, 0x2280 }, /* R788 - IN3L Control */
{ 0x0315, 0x0080 }, /* R789 - IN3R Control */
{ 0x0316, 0x2280 }, /* R790 - IN4L Control */
{ 0x0317, 0x0080 }, /* R791 - IN4R Control */
{ 0x0318, 0x0000 }, /* R792 - RXANC_SRC */
{ 0x0319, 0x0022 }, /* R793 - Input Volume Ramp */
{ 0x0320, 0x0180 }, /* R800 - ADC Digital Volume 1L */
{ 0x0321, 0x0180 }, /* R801 - ADC Digital Volume 1R */
{ 0x0322, 0x0180 }, /* R802 - ADC Digital Volume 2L */
{ 0x0323, 0x0180 }, /* R803 - ADC Digital Volume 2R */
{ 0x0324, 0x0180 }, /* R804 - ADC Digital Volume 3L */
{ 0x0325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
{ 0x0326, 0x0180 }, /* R806 - ADC Digital Volume 4L */
{ 0x0327, 0x0180 }, /* R807 - ADC Digital Volume 4R */
{ 0x0401, 0x0000 }, /* R1025 - Output Enables 2 */
{ 0x0402, 0x0000 }, /* R1026 - Output Status 1 */
{ 0x0403, 0x0000 }, /* R1027 - Output Status 2 */
{ 0x0408, 0x0000 }, /* R1032 - Channel Enables 1 */
{ 0x0410, 0x0080 }, /* R1040 - Out Volume 1L */
{ 0x0411, 0x0080 }, /* R1041 - Out Volume 1R */
{ 0x0412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
{ 0x0413, 0x0080 }, /* R1043 - DAC Volume Limit 1R */
{ 0x0414, 0x0080 }, /* R1044 - Out Volume 2L */
{ 0x0415, 0x0080 }, /* R1045 - Out Volume 2R */
{ 0x0416, 0x0080 }, /* R1046 - DAC Volume Limit 2L */
{ 0x0417, 0x0080 }, /* R1047 - DAC Volume Limit 2R */
{ 0x0418, 0x0080 }, /* R1048 - Out Volume 3L */
{ 0x0419, 0x0080 }, /* R1049 - Out Volume 3R */
{ 0x041A, 0x0080 }, /* R1050 - DAC Volume Limit 3L */
{ 0x041B, 0x0080 }, /* R1051 - DAC Volume Limit 3R */
{ 0x041C, 0x0080 }, /* R1052 - Out Volume 4L */
{ 0x041D, 0x0080 }, /* R1053 - Out Volume 4R */
{ 0x041E, 0x0080 }, /* R1054 - DAC Volume Limit 5L */
{ 0x041F, 0x0080 }, /* R1055 - DAC Volume Limit 5R */
{ 0x0420, 0x0080 }, /* R1056 - DAC Volume Limit 6L */
{ 0x0421, 0x0080 }, /* R1057 - DAC Volume Limit 6R */
{ 0x0440, 0x0000 }, /* R1088 - DAC AEC Control 1 */
{ 0x0441, 0x0022 }, /* R1089 - Output Volume Ramp */
{ 0x0480, 0x0180 }, /* R1152 - DAC Digital Volume 1L */
{ 0x0481, 0x0180 }, /* R1153 - DAC Digital Volume 1R */
{ 0x0482, 0x0180 }, /* R1154 - DAC Digital Volume 2L */
{ 0x0483, 0x0180 }, /* R1155 - DAC Digital Volume 2R */
{ 0x0484, 0x0180 }, /* R1156 - DAC Digital Volume 3L */
{ 0x0485, 0x0180 }, /* R1157 - DAC Digital Volume 3R */
{ 0x0486, 0x0180 }, /* R1158 - DAC Digital Volume 4L */
{ 0x0487, 0x0180 }, /* R1159 - DAC Digital Volume 4R */
{ 0x0488, 0x0180 }, /* R1160 - DAC Digital Volume 5L */
{ 0x0489, 0x0180 }, /* R1161 - DAC Digital Volume 5R */
{ 0x048A, 0x0180 }, /* R1162 - DAC Digital Volume 6L */
{ 0x048B, 0x0180 }, /* R1163 - DAC Digital Volume 6R */
{ 0x04C0, 0x0069 }, /* R1216 - PDM SPK1 CTRL 1 */
{ 0x04C1, 0x0000 }, /* R1217 - PDM SPK1 CTRL 2 */
{ 0x04C2, 0x0069 }, /* R1218 - PDM SPK2 CTRL 1 */
{ 0x04C3, 0x0000 }, /* R1219 - PDM SPK2 CTRL 2 */
{ 0x0500, 0x000C }, /* R1280 - Audio IF 1_1 */
{ 0x0501, 0x0008 }, /* R1281 - Audio IF 1_2 */
{ 0x0502, 0x0000 }, /* R1282 - Audio IF 1_3 */
{ 0x0503, 0x0000 }, /* R1283 - Audio IF 1_4 */
{ 0x0504, 0x0000 }, /* R1284 - Audio IF 1_5 */
{ 0x0505, 0x0300 }, /* R1285 - Audio IF 1_6 */
{ 0x0506, 0x0300 }, /* R1286 - Audio IF 1_7 */
{ 0x0507, 0x1820 }, /* R1287 - Audio IF 1_8 */
{ 0x0508, 0x1820 }, /* R1288 - Audio IF 1_9 */
{ 0x0509, 0x0000 }, /* R1289 - Audio IF 1_10 */
{ 0x050A, 0x0001 }, /* R1290 - Audio IF 1_11 */
{ 0x050B, 0x0002 }, /* R1291 - Audio IF 1_12 */
{ 0x050C, 0x0003 }, /* R1292 - Audio IF 1_13 */
{ 0x050D, 0x0004 }, /* R1293 - Audio IF 1_14 */
{ 0x050E, 0x0005 }, /* R1294 - Audio IF 1_15 */
{ 0x050F, 0x0006 }, /* R1295 - Audio IF 1_16 */
{ 0x0510, 0x0007 }, /* R1296 - Audio IF 1_17 */
{ 0x0511, 0x0000 }, /* R1297 - Audio IF 1_18 */
{ 0x0512, 0x0001 }, /* R1298 - Audio IF 1_19 */
{ 0x0513, 0x0002 }, /* R1299 - Audio IF 1_20 */
{ 0x0514, 0x0003 }, /* R1300 - Audio IF 1_21 */
{ 0x0515, 0x0004 }, /* R1301 - Audio IF 1_22 */
{ 0x0516, 0x0005 }, /* R1302 - Audio IF 1_23 */
{ 0x0517, 0x0006 }, /* R1303 - Audio IF 1_24 */
{ 0x0518, 0x0007 }, /* R1304 - Audio IF 1_25 */
{ 0x0519, 0x0000 }, /* R1305 - Audio IF 1_26 */
{ 0x051A, 0x0000 }, /* R1306 - Audio IF 1_27 */
{ 0x0540, 0x000C }, /* R1344 - Audio IF 2_1 */
{ 0x0541, 0x0008 }, /* R1345 - Audio IF 2_2 */
{ 0x0542, 0x0000 }, /* R1346 - Audio IF 2_3 */
{ 0x0543, 0x0000 }, /* R1347 - Audio IF 2_4 */
{ 0x0544, 0x0000 }, /* R1348 - Audio IF 2_5 */
{ 0x0545, 0x0300 }, /* R1349 - Audio IF 2_6 */
{ 0x0546, 0x0300 }, /* R1350 - Audio IF 2_7 */
{ 0x0547, 0x1820 }, /* R1351 - Audio IF 2_8 */
{ 0x0548, 0x1820 }, /* R1352 - Audio IF 2_9 */
{ 0x0549, 0x0000 }, /* R1353 - Audio IF 2_10 */
{ 0x054A, 0x0001 }, /* R1354 - Audio IF 2_11 */
{ 0x0551, 0x0000 }, /* R1361 - Audio IF 2_18 */
{ 0x0552, 0x0001 }, /* R1362 - Audio IF 2_19 */
{ 0x0559, 0x0000 }, /* R1369 - Audio IF 2_26 */
{ 0x055A, 0x0000 }, /* R1370 - Audio IF 2_27 */
{ 0x0580, 0x000C }, /* R1408 - Audio IF 3_1 */
{ 0x0581, 0x0008 }, /* R1409 - Audio IF 3_2 */
{ 0x0582, 0x0000 }, /* R1410 - Audio IF 3_3 */
{ 0x0583, 0x0000 }, /* R1411 - Audio IF 3_4 */
{ 0x0584, 0x0000 }, /* R1412 - Audio IF 3_5 */
{ 0x0585, 0x0300 }, /* R1413 - Audio IF 3_6 */
{ 0x0586, 0x0300 }, /* R1414 - Audio IF 3_7 */
{ 0x0587, 0x1820 }, /* R1415 - Audio IF 3_8 */
{ 0x0588, 0x1820 }, /* R1416 - Audio IF 3_9 */
{ 0x0589, 0x0000 }, /* R1417 - Audio IF 3_10 */
{ 0x058A, 0x0001 }, /* R1418 - Audio IF 3_11 */
{ 0x0591, 0x0000 }, /* R1425 - Audio IF 3_18 */
{ 0x0592, 0x0001 }, /* R1426 - Audio IF 3_19 */
{ 0x0599, 0x0000 }, /* R1433 - Audio IF 3_26 */
{ 0x059A, 0x0000 }, /* R1434 - Audio IF 3_27 */
{ 0x0640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
{ 0x0641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
{ 0x0642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
{ 0x0643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
{ 0x0644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
{ 0x0645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
{ 0x0646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
{ 0x0647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
{ 0x0648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
{ 0x0649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
{ 0x064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
{ 0x064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
{ 0x064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
{ 0x064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
{ 0x064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
{ 0x064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
{ 0x0680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
{ 0x0681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
{ 0x0682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
{ 0x0683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
{ 0x0684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
{ 0x0685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
{ 0x0686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
{ 0x0687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
{ 0x0688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
{ 0x0689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
{ 0x068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
{ 0x068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
{ 0x068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
{ 0x068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
{ 0x068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
{ 0x068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
{ 0x0690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
{ 0x0691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
{ 0x0692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
{ 0x0693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
{ 0x0694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
{ 0x0695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
{ 0x0696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
{ 0x0697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
{ 0x0698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
{ 0x0699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
{ 0x069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
{ 0x069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
{ 0x069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
{ 0x069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
{ 0x069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
{ 0x069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
{ 0x06A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
{ 0x06A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
{ 0x06A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
{ 0x06A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
{ 0x06A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
{ 0x06A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
{ 0x06A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
{ 0x06A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
{ 0x06A8, 0x0000 }, /* R1704 - OUT3RMIX Input 1 Source */
{ 0x06A9, 0x0080 }, /* R1705 - OUT3RMIX Input 1 Volume */
{ 0x06AA, 0x0000 }, /* R1706 - OUT3RMIX Input 2 Source */
{ 0x06AB, 0x0080 }, /* R1707 - OUT3RMIX Input 2 Volume */
{ 0x06AC, 0x0000 }, /* R1708 - OUT3RMIX Input 3 Source */
{ 0x06AD, 0x0080 }, /* R1709 - OUT3RMIX Input 3 Volume */
{ 0x06AE, 0x0000 }, /* R1710 - OUT3RMIX Input 4 Source */
{ 0x06AF, 0x0080 }, /* R1711 - OUT3RMIX Input 4 Volume */
{ 0x06B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
{ 0x06B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
{ 0x06B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
{ 0x06B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
{ 0x06B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
{ 0x06B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
{ 0x06B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
{ 0x06B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
{ 0x06B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
{ 0x06B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
{ 0x06BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
{ 0x06BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
{ 0x06BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
{ 0x06BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
{ 0x06BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
{ 0x06BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
{ 0x06C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
{ 0x06C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
{ 0x06C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
{ 0x06C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
{ 0x06C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
{ 0x06C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
{ 0x06C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
{ 0x06C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
{ 0x06C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
{ 0x06C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
{ 0x06CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
{ 0x06CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
{ 0x06CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
{ 0x06CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
{ 0x06CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
{ 0x06CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
{ 0x06D0, 0x0000 }, /* R1744 - OUT6LMIX Input 1 Source */
{ 0x06D1, 0x0080 }, /* R1745 - OUT6LMIX Input 1 Volume */
{ 0x06D2, 0x0000 }, /* R1746 - OUT6LMIX Input 2 Source */
{ 0x06D3, 0x0080 }, /* R1747 - OUT6LMIX Input 2 Volume */
{ 0x06D4, 0x0000 }, /* R1748 - OUT6LMIX Input 3 Source */
{ 0x06D5, 0x0080 }, /* R1749 - OUT6LMIX Input 3 Volume */
{ 0x06D6, 0x0000 }, /* R1750 - OUT6LMIX Input 4 Source */
{ 0x06D7, 0x0080 }, /* R1751 - OUT6LMIX Input 4 Volume */
{ 0x06D8, 0x0000 }, /* R1752 - OUT6RMIX Input 1 Source */
{ 0x06D9, 0x0080 }, /* R1753 - OUT6RMIX Input 1 Volume */
{ 0x06DA, 0x0000 }, /* R1754 - OUT6RMIX Input 2 Source */
{ 0x06DB, 0x0080 }, /* R1755 - OUT6RMIX Input 2 Volume */
{ 0x06DC, 0x0000 }, /* R1756 - OUT6RMIX Input 3 Source */
{ 0x06DD, 0x0080 }, /* R1757 - OUT6RMIX Input 3 Volume */
{ 0x06DE, 0x0000 }, /* R1758 - OUT6RMIX Input 4 Source */
{ 0x06DF, 0x0080 }, /* R1759 - OUT6RMIX Input 4 Volume */
{ 0x0700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
{ 0x0701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
{ 0x0702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
{ 0x0703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
{ 0x0704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
{ 0x0705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
{ 0x0706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
{ 0x0707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
{ 0x0708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
{ 0x0709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
{ 0x070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
{ 0x070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
{ 0x070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
{ 0x070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
{ 0x070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
{ 0x070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
{ 0x0710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
{ 0x0711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
{ 0x0712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
{ 0x0713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
{ 0x0714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
{ 0x0715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
{ 0x0716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
{ 0x0717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
{ 0x0718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
{ 0x0719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
{ 0x071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
{ 0x071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
{ 0x071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
{ 0x071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
{ 0x071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
{ 0x071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
{ 0x0720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
{ 0x0721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
{ 0x0722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
{ 0x0723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
{ 0x0724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
{ 0x0725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
{ 0x0726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
{ 0x0727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
{ 0x0728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
{ 0x0729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
{ 0x072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
{ 0x072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
{ 0x072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
{ 0x072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
{ 0x072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
{ 0x072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
{ 0x0730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
{ 0x0731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
{ 0x0732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
{ 0x0733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
{ 0x0734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
{ 0x0735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
{ 0x0736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
{ 0x0737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
{ 0x0738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
{ 0x0739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
{ 0x073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
{ 0x073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
{ 0x073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
{ 0x073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
{ 0x073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
{ 0x073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
{ 0x0740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
{ 0x0741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
{ 0x0742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
{ 0x0743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
{ 0x0744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
{ 0x0745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
{ 0x0746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
{ 0x0747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
{ 0x0748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
{ 0x0749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
{ 0x074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
{ 0x074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
{ 0x074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
{ 0x074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
{ 0x074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
{ 0x074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
{ 0x0780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
{ 0x0781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
{ 0x0782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
{ 0x0783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
{ 0x0784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
{ 0x0785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
{ 0x0786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
{ 0x0787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
{ 0x0788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
{ 0x0789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
{ 0x078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
{ 0x078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
{ 0x078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
{ 0x078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
{ 0x078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
{ 0x078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
{ 0x0880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
{ 0x0881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
{ 0x0882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
{ 0x0883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
{ 0x0884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
{ 0x0885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
{ 0x0886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
{ 0x0887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
{ 0x0888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
{ 0x0889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
{ 0x088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
{ 0x088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
{ 0x088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
{ 0x088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
{ 0x088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
{ 0x088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
{ 0x0890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
{ 0x0891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
{ 0x0892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
{ 0x0893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
{ 0x0894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
{ 0x0895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
{ 0x0896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
{ 0x0897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
{ 0x0898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
{ 0x0899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
{ 0x089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
{ 0x089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
{ 0x089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
{ 0x089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
{ 0x089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
{ 0x089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
{ 0x08C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
{ 0x08C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
{ 0x08C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
{ 0x08C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
{ 0x08C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
{ 0x08C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
{ 0x08C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
{ 0x08C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
{ 0x08C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
{ 0x08C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
{ 0x08CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
{ 0x08CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
{ 0x08CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
{ 0x08CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
{ 0x08CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
{ 0x08CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
{ 0x0900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
{ 0x0901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
{ 0x0902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
{ 0x0903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
{ 0x0904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
{ 0x0905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
{ 0x0906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
{ 0x0907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
{ 0x0908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
{ 0x0909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
{ 0x090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
{ 0x090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
{ 0x090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
{ 0x090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
{ 0x090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
{ 0x090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
{ 0x0910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
{ 0x0911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
{ 0x0912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
{ 0x0913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
{ 0x0914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
{ 0x0915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
{ 0x0916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
{ 0x0917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
{ 0x0918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
{ 0x0919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
{ 0x091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
{ 0x091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
{ 0x091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
{ 0x091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
{ 0x091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
{ 0x091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
{ 0x0940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
{ 0x0941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
{ 0x0942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
{ 0x0943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
{ 0x0944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
{ 0x0945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
{ 0x0946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
{ 0x0947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
{ 0x0948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
{ 0x0949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
{ 0x094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
{ 0x094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
{ 0x094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
{ 0x094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
{ 0x094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
{ 0x094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
{ 0x0950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
{ 0x0958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
{ 0x0960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
{ 0x0968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
{ 0x0970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
{ 0x0978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
{ 0x0980, 0x0000 }, /* R2432 - DSP2LMIX Input 1 Source */
{ 0x0981, 0x0080 }, /* R2433 - DSP2LMIX Input 1 Volume */
{ 0x0982, 0x0000 }, /* R2434 - DSP2LMIX Input 2 Source */
{ 0x0983, 0x0080 }, /* R2435 - DSP2LMIX Input 2 Volume */
{ 0x0984, 0x0000 }, /* R2436 - DSP2LMIX Input 3 Source */
{ 0x0985, 0x0080 }, /* R2437 - DSP2LMIX Input 3 Volume */
{ 0x0986, 0x0000 }, /* R2438 - DSP2LMIX Input 4 Source */
{ 0x0987, 0x0080 }, /* R2439 - DSP2LMIX Input 4 Volume */
{ 0x0988, 0x0000 }, /* R2440 - DSP2RMIX Input 1 Source */
{ 0x0989, 0x0080 }, /* R2441 - DSP2RMIX Input 1 Volume */
{ 0x098A, 0x0000 }, /* R2442 - DSP2RMIX Input 2 Source */
{ 0x098B, 0x0080 }, /* R2443 - DSP2RMIX Input 2 Volume */
{ 0x098C, 0x0000 }, /* R2444 - DSP2RMIX Input 3 Source */
{ 0x098D, 0x0080 }, /* R2445 - DSP2RMIX Input 3 Volume */
{ 0x098E, 0x0000 }, /* R2446 - DSP2RMIX Input 4 Source */
{ 0x098F, 0x0080 }, /* R2447 - DSP2RMIX Input 4 Volume */
{ 0x0990, 0x0000 }, /* R2448 - DSP2AUX1MIX Input 1 Source */
{ 0x0998, 0x0000 }, /* R2456 - DSP2AUX2MIX Input 1 Source */
{ 0x09A0, 0x0000 }, /* R2464 - DSP2AUX3MIX Input 1 Source */
{ 0x09A8, 0x0000 }, /* R2472 - DSP2AUX4MIX Input 1 Source */
{ 0x09B0, 0x0000 }, /* R2480 - DSP2AUX5MIX Input 1 Source */
{ 0x09B8, 0x0000 }, /* R2488 - DSP2AUX6MIX Input 1 Source */
{ 0x09C0, 0x0000 }, /* R2496 - DSP3LMIX Input 1 Source */
{ 0x09C1, 0x0080 }, /* R2497 - DSP3LMIX Input 1 Volume */
{ 0x09C2, 0x0000 }, /* R2498 - DSP3LMIX Input 2 Source */
{ 0x09C3, 0x0080 }, /* R2499 - DSP3LMIX Input 2 Volume */
{ 0x09C4, 0x0000 }, /* R2500 - DSP3LMIX Input 3 Source */
{ 0x09C5, 0x0080 }, /* R2501 - DSP3LMIX Input 3 Volume */
{ 0x09C6, 0x0000 }, /* R2502 - DSP3LMIX Input 4 Source */
{ 0x09C7, 0x0080 }, /* R2503 - DSP3LMIX Input 4 Volume */
{ 0x09C8, 0x0000 }, /* R2504 - DSP3RMIX Input 1 Source */
{ 0x09C9, 0x0080 }, /* R2505 - DSP3RMIX Input 1 Volume */
{ 0x09CA, 0x0000 }, /* R2506 - DSP3RMIX Input 2 Source */
{ 0x09CB, 0x0080 }, /* R2507 - DSP3RMIX Input 2 Volume */
{ 0x09CC, 0x0000 }, /* R2508 - DSP3RMIX Input 3 Source */
{ 0x09CD, 0x0080 }, /* R2509 - DSP3RMIX Input 3 Volume */
{ 0x09CE, 0x0000 }, /* R2510 - DSP3RMIX Input 4 Source */
{ 0x09CF, 0x0080 }, /* R2511 - DSP3RMIX Input 4 Volume */
{ 0x09D0, 0x0000 }, /* R2512 - DSP3AUX1MIX Input 1 Source */
{ 0x09D8, 0x0000 }, /* R2520 - DSP3AUX2MIX Input 1 Source */
{ 0x09E0, 0x0000 }, /* R2528 - DSP3AUX3MIX Input 1 Source */
{ 0x09E8, 0x0000 }, /* R2536 - DSP3AUX4MIX Input 1 Source */
{ 0x09F0, 0x0000 }, /* R2544 - DSP3AUX5MIX Input 1 Source */
{ 0x09F8, 0x0000 }, /* R2552 - DSP3AUX6MIX Input 1 Source */
{ 0x0A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
{ 0x0A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
{ 0x0A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
{ 0x0A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
{ 0x0B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
{ 0x0B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
{ 0x0B10, 0x0000 }, /* R2832 - ISRC1DEC3MIX Input 1 Source */
{ 0x0B18, 0x0000 }, /* R2840 - ISRC1DEC4MIX Input 1 Source */
{ 0x0B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
{ 0x0B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
{ 0x0B30, 0x0000 }, /* R2864 - ISRC1INT3MIX Input 1 Source */
{ 0x0B38, 0x0000 }, /* R2872 - ISRC1INT4MIX Input 1 Source */
{ 0x0B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
{ 0x0B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
{ 0x0B50, 0x0000 }, /* R2896 - ISRC2DEC3MIX Input 1 Source */
{ 0x0B58, 0x0000 }, /* R2904 - ISRC2DEC4MIX Input 1 Source */
{ 0x0B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
{ 0x0B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
{ 0x0B70, 0x0000 }, /* R2928 - ISRC2INT3MIX Input 1 Source */
{ 0x0B78, 0x0000 }, /* R2936 - ISRC2INT4MIX Input 1 Source */
{ 0x0C00, 0xA001 }, /* R3072 - GPIO CTRL 1 */
{ 0x0C01, 0xA001 }, /* R3073 - GPIO CTRL 2 */
{ 0x0C02, 0xA001 }, /* R3074 - GPIO CTRL 3 */
{ 0x0C03, 0xA001 }, /* R3075 - GPIO CTRL 4 */
{ 0x0C04, 0xA001 }, /* R3076 - GPIO CTRL 5 */
{ 0x0C05, 0xA001 }, /* R3077 - GPIO CTRL 6 */
{ 0x0C23, 0x4003 }, /* R3107 - Misc Pad Ctrl 1 */
{ 0x0C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 2 */
{ 0x0C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 3 */
{ 0x0C26, 0x0000 }, /* R3110 - Misc Pad Ctrl 4 */
{ 0x0C27, 0x0000 }, /* R3111 - Misc Pad Ctrl 5 */
{ 0x0C28, 0x0000 }, /* R3112 - Misc GPIO 1 */
{ 0x0D00, 0x0000 }, /* R3328 - Interrupt Status 1 */
{ 0x0D01, 0x0000 }, /* R3329 - Interrupt Status 2 */
{ 0x0D02, 0x0000 }, /* R3330 - Interrupt Status 3 */
{ 0x0D03, 0x0000 }, /* R3331 - Interrupt Status 4 */
{ 0x0D04, 0x0000 }, /* R3332 - Interrupt Raw Status 2 */
{ 0x0D05, 0x0000 }, /* R3333 - Interrupt Raw Status 3 */
{ 0x0D06, 0x0000 }, /* R3334 - Interrupt Raw Status 4 */
{ 0x0D07, 0xFFFF }, /* R3335 - Interrupt Status 1 Mask */
{ 0x0D08, 0xFFFF }, /* R3336 - Interrupt Status 2 Mask */
{ 0x0D09, 0xFFFF }, /* R3337 - Interrupt Status 3 Mask */
{ 0x0D0A, 0xFFFF }, /* R3338 - Interrupt Status 4 Mask */
{ 0x0D1F, 0x0000 }, /* R3359 - Interrupt Control */
{ 0x0D20, 0xFFFF }, /* R3360 - IRQ Debounce 1 */
{ 0x0D21, 0xFFFF }, /* R3361 - IRQ Debounce 2 */
{ 0x0E00, 0x0000 }, /* R3584 - FX_Ctrl */
{ 0x0E10, 0x6318 }, /* R3600 - EQ1_1 */
{ 0x0E11, 0x6300 }, /* R3601 - EQ1_2 */
{ 0x0E12, 0x0FC8 }, /* R3602 - EQ1_3 */
{ 0x0E13, 0x03FE }, /* R3603 - EQ1_4 */
{ 0x0E14, 0x00E0 }, /* R3604 - EQ1_5 */
{ 0x0E15, 0x1EC4 }, /* R3605 - EQ1_6 */
{ 0x0E16, 0xF136 }, /* R3606 - EQ1_7 */
{ 0x0E17, 0x0409 }, /* R3607 - EQ1_8 */
{ 0x0E18, 0x04CC }, /* R3608 - EQ1_9 */
{ 0x0E19, 0x1C9B }, /* R3609 - EQ1_10 */
{ 0x0E1A, 0xF337 }, /* R3610 - EQ1_11 */
{ 0x0E1B, 0x040B }, /* R3611 - EQ1_12 */
{ 0x0E1C, 0x0CBB }, /* R3612 - EQ1_13 */
{ 0x0E1D, 0x16F8 }, /* R3613 - EQ1_14 */
{ 0x0E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
{ 0x0E1F, 0x040A }, /* R3615 - EQ1_16 */
{ 0x0E20, 0x1F14 }, /* R3616 - EQ1_17 */
{ 0x0E21, 0x058C }, /* R3617 - EQ1_18 */
{ 0x0E22, 0x0563 }, /* R3618 - EQ1_19 */
{ 0x0E23, 0x4000 }, /* R3619 - EQ1_20 */
{ 0x0E26, 0x6318 }, /* R3622 - EQ2_1 */
{ 0x0E27, 0x6300 }, /* R3623 - EQ2_2 */
{ 0x0E28, 0x0FC8 }, /* R3624 - EQ2_3 */
{ 0x0E29, 0x03FE }, /* R3625 - EQ2_4 */
{ 0x0E2A, 0x00E0 }, /* R3626 - EQ2_5 */
{ 0x0E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
{ 0x0E2C, 0xF136 }, /* R3628 - EQ2_7 */
{ 0x0E2D, 0x0409 }, /* R3629 - EQ2_8 */
{ 0x0E2E, 0x04CC }, /* R3630 - EQ2_9 */
{ 0x0E2F, 0x1C9B }, /* R3631 - EQ2_10 */
{ 0x0E30, 0xF337 }, /* R3632 - EQ2_11 */
{ 0x0E31, 0x040B }, /* R3633 - EQ2_12 */
{ 0x0E32, 0x0CBB }, /* R3634 - EQ2_13 */
{ 0x0E33, 0x16F8 }, /* R3635 - EQ2_14 */
{ 0x0E34, 0xF7D9 }, /* R3636 - EQ2_15 */
{ 0x0E35, 0x040A }, /* R3637 - EQ2_16 */
{ 0x0E36, 0x1F14 }, /* R3638 - EQ2_17 */
{ 0x0E37, 0x058C }, /* R3639 - EQ2_18 */
{ 0x0E38, 0x0563 }, /* R3640 - EQ2_19 */
{ 0x0E39, 0x4000 }, /* R3641 - EQ2_20 */
{ 0x0E3C, 0x6318 }, /* R3644 - EQ3_1 */
{ 0x0E3D, 0x6300 }, /* R3645 - EQ3_2 */
{ 0x0E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
{ 0x0E3F, 0x03FE }, /* R3647 - EQ3_4 */
{ 0x0E40, 0x00E0 }, /* R3648 - EQ3_5 */
{ 0x0E41, 0x1EC4 }, /* R3649 - EQ3_6 */
{ 0x0E42, 0xF136 }, /* R3650 - EQ3_7 */
{ 0x0E43, 0x0409 }, /* R3651 - EQ3_8 */
{ 0x0E44, 0x04CC }, /* R3652 - EQ3_9 */
{ 0x0E45, 0x1C9B }, /* R3653 - EQ3_10 */
{ 0x0E46, 0xF337 }, /* R3654 - EQ3_11 */
{ 0x0E47, 0x040B }, /* R3655 - EQ3_12 */
{ 0x0E48, 0x0CBB }, /* R3656 - EQ3_13 */
{ 0x0E49, 0x16F8 }, /* R3657 - EQ3_14 */
{ 0x0E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
{ 0x0E4B, 0x040A }, /* R3659 - EQ3_16 */
{ 0x0E4C, 0x1F14 }, /* R3660 - EQ3_17 */
{ 0x0E4D, 0x058C }, /* R3661 - EQ3_18 */
{ 0x0E4E, 0x0563 }, /* R3662 - EQ3_19 */
{ 0x0E4F, 0x4000 }, /* R3663 - EQ3_20 */
{ 0x0E52, 0x6318 }, /* R3666 - EQ4_1 */
{ 0x0E53, 0x6300 }, /* R3667 - EQ4_2 */
{ 0x0E54, 0x0FC8 }, /* R3668 - EQ4_3 */
{ 0x0E55, 0x03FE }, /* R3669 - EQ4_4 */
{ 0x0E56, 0x00E0 }, /* R3670 - EQ4_5 */
{ 0x0E57, 0x1EC4 }, /* R3671 - EQ4_6 */
{ 0x0E58, 0xF136 }, /* R3672 - EQ4_7 */
{ 0x0E59, 0x0409 }, /* R3673 - EQ4_8 */
{ 0x0E5A, 0x04CC }, /* R3674 - EQ4_9 */
{ 0x0E5B, 0x1C9B }, /* R3675 - EQ4_10 */
{ 0x0E5C, 0xF337 }, /* R3676 - EQ4_11 */
{ 0x0E5D, 0x040B }, /* R3677 - EQ4_12 */
{ 0x0E5E, 0x0CBB }, /* R3678 - EQ4_13 */
{ 0x0E5F, 0x16F8 }, /* R3679 - EQ4_14 */
{ 0x0E60, 0xF7D9 }, /* R3680 - EQ4_15 */
{ 0x0E61, 0x040A }, /* R3681 - EQ4_16 */
{ 0x0E62, 0x1F14 }, /* R3682 - EQ4_17 */
{ 0x0E63, 0x058C }, /* R3683 - EQ4_18 */
{ 0x0E64, 0x0563 }, /* R3684 - EQ4_19 */
{ 0x0E65, 0x4000 }, /* R3685 - EQ4_20 */
{ 0x0E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
{ 0x0E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
{ 0x0E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
{ 0x0E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
{ 0x0E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
{ 0x0EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
{ 0x0EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
{ 0x0EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
{ 0x0EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
{ 0x0EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
{ 0x0EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
{ 0x0ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
{ 0x0ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x0F02, 0x0000 }, /* R3842 - DSP1 Control 2 */
{ 0x0F03, 0x0000 }, /* R3843 - DSP1 Control 3 */
{ 0x0F04, 0x0000 }, /* R3844 - DSP1 Control 4 */
{ 0x1002, 0x0000 }, /* R4098 - DSP2 Control 2 */
{ 0x1003, 0x0000 }, /* R4099 - DSP2 Control 3 */
{ 0x1004, 0x0000 }, /* R4100 - DSP2 Control 4 */
{ 0x1102, 0x0000 }, /* R4354 - DSP3 Control 2 */
{ 0x1103, 0x0000 }, /* R4355 - DSP3 Control 3 */
{ 0x1104, 0x0000 }, /* R4356 - DSP3 Control 4 */
};
| gpl-2.0 |
weritos666/Vee7 | drivers/block/pktcdvd.c | 4840 | 78218 | /*
* Copyright (C) 2000 Jens Axboe <axboe@suse.de>
* Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
* Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*
* Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
* DVD-RAM devices.
*
* Theory of operation:
*
* At the lowest level, there is the standard driver for the CD/DVD device,
* typically ide-cd.c or sr.c. This driver can handle read and write requests,
* but it doesn't know anything about the special restrictions that apply to
* packet writing. One restriction is that write requests must be aligned to
* packet boundaries on the physical media, and the size of a write request
* must be equal to the packet size. Another restriction is that a
* GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
* command, if the previous command was a write.
*
* The purpose of the packet writing driver is to hide these restrictions from
* higher layers, such as file systems, and present a block device that can be
* randomly read and written using 2kB-sized blocks.
*
* The lowest layer in the packet writing driver is the packet I/O scheduler.
* Its data is defined by the struct packet_iosched and includes two bio
* queues with pending read and write requests. These queues are processed
* by the pkt_iosched_process_queue() function. The write requests in this
* queue are already properly aligned and sized. This layer is responsible for
* issuing the flush cache commands and scheduling the I/O in a good order.
*
* The next layer transforms unaligned write requests to aligned writes. This
* transformation requires reading missing pieces of data from the underlying
* block device, assembling the pieces to full packets and queuing them to the
* packet I/O scheduler.
*
* At the top layer there is a custom make_request_fn function that forwards
* read requests directly to the iosched queue and puts write requests in the
* unaligned write queue. A kernel thread performs the necessary read
* gathering to convert the unaligned writes to aligned writes and then feeds
* them to the packet I/O scheduler.
*
*************************************************************************/
#include <linux/pktcdvd.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/freezer.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <asm/uaccess.h>
#define DRIVER_NAME "pktcdvd"
#if PACKET_DEBUG
#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
#else
#define DPRINTK(fmt, args...)
#endif
#if PACKET_DEBUG > 1
#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
#else
#define VPRINTK(fmt, args...)
#endif
#define MAX_SPEED 0xffff
#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
static struct proc_dir_entry *pkt_proc;
static int pktdev_major;
static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
static mempool_t *psd_pool;
static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
/* forward declaration */
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
static int pkt_seq_show(struct seq_file *m, void *p);
/*
* create and register a pktcdvd kernel object.
*/
static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
const char* name,
struct kobject* parent,
struct kobj_type* ktype)
{
struct pktcdvd_kobj *p;
int error;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
p->pd = pd;
error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
if (error) {
kobject_put(&p->kobj);
return NULL;
}
kobject_uevent(&p->kobj, KOBJ_ADD);
return p;
}
/*
* remove a pktcdvd kernel object.
*/
static void pkt_kobj_remove(struct pktcdvd_kobj *p)
{
if (p)
kobject_put(&p->kobj);
}
/*
* default release function for pktcdvd kernel objects.
*/
static void pkt_kobj_release(struct kobject *kobj)
{
kfree(to_pktcdvdkobj(kobj));
}
/**********************************************************
*
* sysfs interface for pktcdvd
* by (C) 2006 Thomas Maier <balagi@justmail.de>
*
**********************************************************/
#define DEF_ATTR(_obj,_name,_mode) \
static struct attribute _obj = { .name = _name, .mode = _mode }
/**********************************************************
/sys/class/pktcdvd/pktcdvd[0-7]/
stat/reset
stat/packets_started
stat/packets_finished
stat/kb_written
stat/kb_read
stat/kb_read_gather
write_queue/size
write_queue/congestion_off
write_queue/congestion_on
**********************************************************/
DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
static struct attribute *kobj_pkt_attrs_stat[] = {
&kobj_pkt_attr_st1,
&kobj_pkt_attr_st2,
&kobj_pkt_attr_st3,
&kobj_pkt_attr_st4,
&kobj_pkt_attr_st5,
&kobj_pkt_attr_st6,
NULL
};
DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
static struct attribute *kobj_pkt_attrs_wqueue[] = {
&kobj_pkt_attr_wq1,
&kobj_pkt_attr_wq2,
&kobj_pkt_attr_wq3,
NULL
};
static ssize_t kobj_pkt_show(struct kobject *kobj,
struct attribute *attr, char *data)
{
struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
int n = 0;
int v;
if (strcmp(attr->name, "packets_started") == 0) {
n = sprintf(data, "%lu\n", pd->stats.pkt_started);
} else if (strcmp(attr->name, "packets_finished") == 0) {
n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
} else if (strcmp(attr->name, "kb_written") == 0) {
n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
} else if (strcmp(attr->name, "kb_read") == 0) {
n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
} else if (strcmp(attr->name, "kb_read_gather") == 0) {
n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
} else if (strcmp(attr->name, "size") == 0) {
spin_lock(&pd->lock);
v = pd->bio_queue_size;
spin_unlock(&pd->lock);
n = sprintf(data, "%d\n", v);
} else if (strcmp(attr->name, "congestion_off") == 0) {
spin_lock(&pd->lock);
v = pd->write_congestion_off;
spin_unlock(&pd->lock);
n = sprintf(data, "%d\n", v);
} else if (strcmp(attr->name, "congestion_on") == 0) {
spin_lock(&pd->lock);
v = pd->write_congestion_on;
spin_unlock(&pd->lock);
n = sprintf(data, "%d\n", v);
}
return n;
}
static void init_write_congestion_marks(int* lo, int* hi)
{
if (*hi > 0) {
*hi = max(*hi, 500);
*hi = min(*hi, 1000000);
if (*lo <= 0)
*lo = *hi - 100;
else {
*lo = min(*lo, *hi - 100);
*lo = max(*lo, 100);
}
} else {
*hi = -1;
*lo = -1;
}
}
static ssize_t kobj_pkt_store(struct kobject *kobj,
struct attribute *attr,
const char *data, size_t len)
{
struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
int val;
if (strcmp(attr->name, "reset") == 0 && len > 0) {
pd->stats.pkt_started = 0;
pd->stats.pkt_ended = 0;
pd->stats.secs_w = 0;
pd->stats.secs_rg = 0;
pd->stats.secs_r = 0;
} else if (strcmp(attr->name, "congestion_off") == 0
&& sscanf(data, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_off = val;
init_write_congestion_marks(&pd->write_congestion_off,
&pd->write_congestion_on);
spin_unlock(&pd->lock);
} else if (strcmp(attr->name, "congestion_on") == 0
&& sscanf(data, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_on = val;
init_write_congestion_marks(&pd->write_congestion_off,
&pd->write_congestion_on);
spin_unlock(&pd->lock);
}
return len;
}
static const struct sysfs_ops kobj_pkt_ops = {
.show = kobj_pkt_show,
.store = kobj_pkt_store
};
static struct kobj_type kobj_pkt_type_stat = {
.release = pkt_kobj_release,
.sysfs_ops = &kobj_pkt_ops,
.default_attrs = kobj_pkt_attrs_stat
};
static struct kobj_type kobj_pkt_type_wqueue = {
.release = pkt_kobj_release,
.sysfs_ops = &kobj_pkt_ops,
.default_attrs = kobj_pkt_attrs_wqueue
};
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
"%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
}
if (pd->dev) {
pd->kobj_stat = pkt_kobj_create(pd, "stat",
&pd->dev->kobj,
&kobj_pkt_type_stat);
pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
&pd->dev->kobj,
&kobj_pkt_type_wqueue);
}
}
static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
{
pkt_kobj_remove(pd->kobj_stat);
pkt_kobj_remove(pd->kobj_wqueue);
if (class_pktcdvd)
device_unregister(pd->dev);
}
/********************************************************************
/sys/class/pktcdvd/
add map block device
remove unmap packet dev
device_map show mappings
*******************************************************************/
static void class_pktcdvd_release(struct class *cls)
{
kfree(cls);
}
static ssize_t class_pktcdvd_show_map(struct class *c,
struct class_attribute *attr,
char *data)
{
int n = 0;
int idx;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++) {
struct pktcdvd_device *pd = pkt_devs[idx];
if (!pd)
continue;
n += sprintf(data+n, "%s %u:%u %u:%u\n",
pd->name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
MAJOR(pd->bdev->bd_dev),
MINOR(pd->bdev->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
}
static ssize_t class_pktcdvd_store_add(struct class *c,
struct class_attribute *attr,
const char *buf,
size_t count)
{
unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
/* pkt_setup_dev() expects caller to hold reference to self */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
pkt_setup_dev(MKDEV(major, minor), NULL);
module_put(THIS_MODULE);
return count;
}
return -EINVAL;
}
static ssize_t class_pktcdvd_store_remove(struct class *c,
struct class_attribute *attr,
const char *buf,
size_t count)
{
unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
pkt_remove_dev(MKDEV(major, minor));
return count;
}
return -EINVAL;
}
static struct class_attribute class_pktcdvd_attrs[] = {
__ATTR(add, 0200, NULL, class_pktcdvd_store_add),
__ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
__ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
__ATTR_NULL
};
static int pkt_sysfs_init(void)
{
int ret = 0;
/*
* create control files in sysfs
* /sys/class/pktcdvd/...
*/
class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
if (!class_pktcdvd)
return -ENOMEM;
class_pktcdvd->name = DRIVER_NAME;
class_pktcdvd->owner = THIS_MODULE;
class_pktcdvd->class_release = class_pktcdvd_release;
class_pktcdvd->class_attrs = class_pktcdvd_attrs;
ret = class_register(class_pktcdvd);
if (ret) {
kfree(class_pktcdvd);
class_pktcdvd = NULL;
printk(DRIVER_NAME": failed to create class pktcdvd\n");
return ret;
}
return 0;
}
static void pkt_sysfs_cleanup(void)
{
if (class_pktcdvd)
class_destroy(class_pktcdvd);
class_pktcdvd = NULL;
}
/********************************************************************
entries in debugfs
/sys/kernel/debug/pktcdvd[0-7]/
info
*******************************************************************/
static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
{
return pkt_seq_show(m, p);
}
static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
{
return single_open(file, pkt_debugfs_seq_show, inode->i_private);
}
static const struct file_operations debug_fops = {
.open = pkt_debugfs_fops_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
};
static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
pd->dfs_f_info = NULL;
pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
if (IS_ERR(pd->dfs_d_root)) {
pd->dfs_d_root = NULL;
return;
}
pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
pd->dfs_d_root, pd, &debug_fops);
if (IS_ERR(pd->dfs_f_info)) {
pd->dfs_f_info = NULL;
return;
}
}
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
if (pd->dfs_f_info)
debugfs_remove(pd->dfs_f_info);
pd->dfs_f_info = NULL;
if (pd->dfs_d_root)
debugfs_remove(pd->dfs_d_root);
pd->dfs_d_root = NULL;
}
static void pkt_debugfs_init(void)
{
pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
if (IS_ERR(pkt_debugfs_root)) {
pkt_debugfs_root = NULL;
return;
}
}
static void pkt_debugfs_cleanup(void)
{
if (!pkt_debugfs_root)
return;
debugfs_remove(pkt_debugfs_root);
pkt_debugfs_root = NULL;
}
/* ----------------------------------------------------------*/
static void pkt_bio_finished(struct pktcdvd_device *pd)
{
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
VPRINTK(DRIVER_NAME": queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
}
static void pkt_bio_destructor(struct bio *bio)
{
kfree(bio->bi_io_vec);
kfree(bio);
}
static struct bio *pkt_bio_alloc(int nr_iovecs)
{
struct bio_vec *bvl = NULL;
struct bio *bio;
bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
if (!bio)
goto no_bio;
bio_init(bio);
bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvl)
goto no_bvl;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
bio->bi_destructor = pkt_bio_destructor;
return bio;
no_bvl:
kfree(bio);
no_bio:
return NULL;
}
/*
* Allocate a packet_data struct
*/
static struct packet_data *pkt_alloc_packet_data(int frames)
{
int i;
struct packet_data *pkt;
pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
if (!pkt)
goto no_pkt;
pkt->frames = frames;
pkt->w_bio = pkt_bio_alloc(frames);
if (!pkt->w_bio)
goto no_bio;
for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pkt->pages[i])
goto no_page;
}
spin_lock_init(&pkt->lock);
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
struct bio *bio = pkt_bio_alloc(1);
if (!bio)
goto no_rd_bio;
pkt->r_bios[i] = bio;
}
return pkt;
no_rd_bio:
for (i = 0; i < frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
no_page:
for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i])
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
no_bio:
kfree(pkt);
no_pkt:
return NULL;
}
/*
* Free a packet_data struct
*/
static void pkt_free_packet_data(struct packet_data *pkt)
{
int i;
for (i = 0; i < pkt->frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
kfree(pkt);
}
static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
pkt_free_packet_data(pkt);
}
INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
}
static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
{
struct packet_data *pkt;
BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
while (nr_packets > 0) {
pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
if (!pkt) {
pkt_shrink_pktlist(pd);
return 0;
}
pkt->id = nr_packets;
pkt->pd = pd;
list_add(&pkt->list, &pd->cdrw.pkt_free_list);
nr_packets--;
}
return 1;
}
static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
{
struct rb_node *n = rb_next(&node->rb_node);
if (!n)
return NULL;
return rb_entry(n, struct pkt_rb_node, rb_node);
}
static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
rb_erase(&node->rb_node, &pd->bio_queue);
mempool_free(node, pd->rb_pool);
pd->bio_queue_size--;
BUG_ON(pd->bio_queue_size < 0);
}
/*
* Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
*/
static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
{
struct rb_node *n = pd->bio_queue.rb_node;
struct rb_node *next;
struct pkt_rb_node *tmp;
if (!n) {
BUG_ON(pd->bio_queue_size > 0);
return NULL;
}
for (;;) {
tmp = rb_entry(n, struct pkt_rb_node, rb_node);
if (s <= tmp->bio->bi_sector)
next = n->rb_left;
else
next = n->rb_right;
if (!next)
break;
n = next;
}
if (s > tmp->bio->bi_sector) {
tmp = pkt_rbtree_next(tmp);
if (!tmp)
return NULL;
}
BUG_ON(s > tmp->bio->bi_sector);
return tmp;
}
/*
* Insert a node into the pd->bio_queue rb tree.
*/
static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
{
struct rb_node **p = &pd->bio_queue.rb_node;
struct rb_node *parent = NULL;
sector_t s = node->bio->bi_sector;
struct pkt_rb_node *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
if (s < tmp->bio->bi_sector)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &pd->bio_queue);
pd->bio_queue_size++;
}
/*
* Send a packet_command to the underlying block device and
* wait for completion.
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
struct request_queue *q = bdev_get_queue(pd->bdev);
struct request *rq;
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
WRITE : READ, __GFP_WAIT);
if (cgc->buflen) {
if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
goto out;
}
rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq->timeout = 60*HZ;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
if (cgc->quiet)
rq->cmd_flags |= REQ_QUIET;
blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
if (rq->errors)
ret = -EIO;
out:
blk_put_request(rq);
return ret;
}
/*
* A generic sense dump / resolve mechanism should be implemented across
* all ATAPI + SCSI devices.
*/
static void pkt_dump_sense(struct packet_command *cgc)
{
static char *info[9] = { "No sense", "Recovered error", "Not ready",
"Medium error", "Hardware error", "Illegal request",
"Unit attention", "Data protect", "Blank check" };
int i;
struct request_sense *sense = cgc->sense;
printk(DRIVER_NAME":");
for (i = 0; i < CDROM_PACKET_SIZE; i++)
printk(" %02x", cgc->cmd[i]);
printk(" - ");
if (sense == NULL) {
printk("no sense\n");
return;
}
printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
if (sense->sense_key > 8) {
printk(" (INVALID)\n");
return;
}
printk(" (%s)\n", info[sense->sense_key]);
}
/*
* flush the drive cache to media
*/
static int pkt_flush_cache(struct pktcdvd_device *pd)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_FLUSH_CACHE;
cgc.quiet = 1;
/*
* the IMMED bit -- we default to not setting it, although that
* would allow a much faster close, this is safer
*/
#if 0
cgc.cmd[1] = 1 << 1;
#endif
return pkt_generic_packet(pd, &cgc);
}
/*
* speed is given as the normal factor, e.g. 4 for 4x
*/
static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
unsigned write_speed, unsigned read_speed)
{
struct packet_command cgc;
struct request_sense sense;
int ret;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sense = &sense;
cgc.cmd[0] = GPCMD_SET_SPEED;
cgc.cmd[2] = (read_speed >> 8) & 0xff;
cgc.cmd[3] = read_speed & 0xff;
cgc.cmd[4] = (write_speed >> 8) & 0xff;
cgc.cmd[5] = write_speed & 0xff;
if ((ret = pkt_generic_packet(pd, &cgc)))
pkt_dump_sense(&cgc);
return ret;
}
/*
* Queue a bio for processing by the low-level CD device. Must be called
* from process context.
*/
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ)
bio_list_add(&pd->iosched.read_queue, bio);
else
bio_list_add(&pd->iosched.write_queue, bio);
spin_unlock(&pd->iosched.lock);
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
/*
* Process the queued read/write requests. This function handles special
* requirements for CDRW drives:
* - A cache flush command must be inserted before a read request if the
* previous request was a write.
* - Switching between reading and writing is slow, so don't do it more often
* than necessary.
* - Optimize for throughput at the expense of latency. This means that streaming
* writes will never be interrupted by a read, but if the drive has to seek
* before the next write, switch to reading instead if there are any pending
* read requests.
* - Set the read speed according to current usage pattern. When only reading
* from the device, it's best to use the highest possible read speed, but
* when switching often between reading and writing, it's better to have the
* same read and write speeds.
*/
static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{
if (atomic_read(&pd->iosched.attention) == 0)
return;
atomic_set(&pd->iosched.attention, 0);
for (;;) {
struct bio *bio;
int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock);
reads_queued = !bio_list_empty(&pd->iosched.read_queue);
writes_queued = !bio_list_empty(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued)
break;
if (pd->iosched.writing) {
int need_write_seek = 1;
spin_lock(&pd->iosched.lock);
bio = bio_list_peek(&pd->iosched.write_queue);
spin_unlock(&pd->iosched.lock);
if (bio && (bio->bi_sector == pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
VPRINTK(DRIVER_NAME": write, waiting\n");
break;
}
pkt_flush_cache(pd);
pd->iosched.writing = 0;
}
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
VPRINTK(DRIVER_NAME": read, waiting\n");
break;
}
pd->iosched.writing = 1;
}
}
spin_lock(&pd->iosched.lock);
if (pd->iosched.writing)
bio = bio_list_pop(&pd->iosched.write_queue);
else
bio = bio_list_pop(&pd->iosched.read_queue);
spin_unlock(&pd->iosched.lock);
if (!bio)
continue;
if (bio_data_dir(bio) == READ)
pd->iosched.successive_reads += bio->bi_size >> 10;
else {
pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
pd->read_speed = MAX_SPEED;
pkt_set_speed(pd, pd->write_speed, pd->read_speed);
}
} else {
if (pd->read_speed != pd->write_speed) {
pd->read_speed = pd->write_speed;
pkt_set_speed(pd, pd->write_speed, pd->read_speed);
}
}
atomic_inc(&pd->cdrw.pending_bios);
generic_make_request(bio);
}
}
/*
* Special care is needed if the underlying block device has a small
* max_phys_segments value.
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE
<= queue_max_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else if ((pd->settings.size << 9) / PAGE_SIZE
<= queue_max_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
*/
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else {
printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
return -EIO;
}
}
/*
* Copy CD_FRAMESIZE bytes from src_bio into a destination page
*/
static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
{
unsigned int copy_size = CD_FRAMESIZE;
while (copy_size > 0) {
struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
void *vfrom = kmap_atomic(src_bvl->bv_page) +
src_bvl->bv_offset + offs;
void *vto = page_address(dst_page) + dst_offs;
int len = min_t(int, copy_size, src_bvl->bv_len - offs);
BUG_ON(len < 0);
memcpy(vto, vfrom, len);
kunmap_atomic(vfrom);
seg++;
offs = 0;
dst_offs += len;
copy_size -= len;
}
}
/*
* Copy all data for this packet to pkt->pages[], so that
* a) The number of required segments for the write bio is minimized, which
* is necessary for some scsi controllers.
* b) The data can be used as cache to avoid read requests if we receive a
* new write request for the same zone.
*/
static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
{
int f, p, offs;
/* Copy all data to pkt->pages[] */
p = 0;
offs = 0;
for (f = 0; f < pkt->frames; f++) {
if (bvec[f].bv_page != pkt->pages[p]) {
void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
void *vto = page_address(pkt->pages[p]) + offs;
memcpy(vto, vfrom, CD_FRAMESIZE);
kunmap_atomic(vfrom);
bvec[f].bv_page = pkt->pages[p];
bvec[f].bv_offset = offs;
} else {
BUG_ON(bvec[f].bv_offset != offs);
}
offs += CD_FRAMESIZE;
if (offs >= PAGE_SIZE) {
offs = 0;
p++;
}
}
}
static void pkt_end_io_read(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
if (err)
atomic_inc(&pkt->io_errors);
if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
pkt_bio_finished(pd);
}
static void pkt_end_io_packet_write(struct bio *bio, int err)
{
struct packet_data *pkt = bio->bi_private;
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
pd->stats.pkt_ended++;
pkt_bio_finished(pd);
atomic_dec(&pkt->io_wait);
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
/*
* Schedule reads for the holes in a packet
*/
static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int frames_read = 0;
struct bio *bio;
int f;
char written[PACKET_MAX_SIZE];
BUG_ON(bio_list_empty(&pkt->orig_bios));
atomic_set(&pkt->io_wait, 0);
atomic_set(&pkt->io_errors, 0);
/*
* Figure out which frames we need to read before we can write.
*/
memset(written, 0, sizeof(written));
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
int num_frames = bio->bi_size / CD_FRAMESIZE;
pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
for (f = first_frame; f < first_frame + num_frames; f++)
written[f] = 1;
}
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
VPRINTK("pkt_gather_data: zone %llx cached\n",
(unsigned long long)pkt->sector);
goto out_account;
}
/*
* Schedule reads for missing parts of the packet.
*/
for (f = 0; f < pkt->frames; f++) {
struct bio_vec *vec;
int p, offset;
if (written[f])
continue;
bio = pkt->r_bios[f];
vec = bio->bi_io_vec;
bio_init(bio);
bio->bi_max_vecs = 1;
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
bio->bi_io_vec = vec;
bio->bi_destructor = pkt_bio_destructor;
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
f, pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
atomic_inc(&pkt->io_wait);
bio->bi_rw = READ;
pkt_queue_bio(pd, bio);
frames_read++;
}
out_account:
VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
frames_read, (unsigned long long)pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
}
/*
* Find a packet matching zone, or the least recently used packet if
* there is no match.
*/
static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
{
struct packet_data *pkt;
list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
list_del_init(&pkt->list);
if (pkt->sector != zone)
pkt->cache_valid = 0;
return pkt;
}
}
BUG();
return NULL;
}
static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
{
if (pkt->cache_valid) {
list_add(&pkt->list, &pd->cdrw.pkt_free_list);
} else {
list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
}
}
/*
* recover a failed write, query for relocation if possible
*
* returns 1 if recovery is possible, or 0 if not
*
*/
static int pkt_start_recovery(struct packet_data *pkt)
{
/*
* FIXME. We need help from the file system to implement
* recovery handling.
*/
return 0;
#if 0
struct request *rq = pkt->rq;
struct pktcdvd_device *pd = rq->rq_disk->private_data;
struct block_device *pkt_bdev;
struct super_block *sb = NULL;
unsigned long old_block, new_block;
sector_t new_sector;
pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
if (pkt_bdev) {
sb = get_super(pkt_bdev);
bdput(pkt_bdev);
}
if (!sb)
return 0;
if (!sb->s_op->relocate_blocks)
goto out;
old_block = pkt->sector / (CD_FRAMESIZE >> 9);
if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
goto out;
new_sector = new_block * (CD_FRAMESIZE >> 9);
pkt->sector = new_sector;
pkt->bio->bi_sector = new_sector;
pkt->bio->bi_next = NULL;
pkt->bio->bi_flags = 1 << BIO_UPTODATE;
pkt->bio->bi_idx = 0;
BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
BUG_ON(pkt->bio->bi_private != pkt);
drop_super(sb);
return 1;
out:
drop_super(sb);
return 0;
#endif
}
static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
{
#if PACKET_DEBUG > 1
static const char *state_name[] = {
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
state_name[old_state], state_name[state]);
#endif
pkt->state = state;
}
/*
* Scan the work queue to see if we can start a new packet.
* returns non-zero if any work was done.
*/
static int pkt_handle_queue(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *p;
struct bio *bio = NULL;
sector_t zone = 0; /* Suppress gcc warning */
struct pkt_rb_node *node, *first_node;
struct rb_node *n;
int wakeup;
VPRINTK("handle_queue\n");
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
VPRINTK("handle_queue: no pkt\n");
return 0;
}
/*
* Try to find a zone we are not already working on.
*/
spin_lock(&pd->lock);
first_node = pkt_rbtree_find(pd, pd->current_sector);
if (!first_node) {
n = rb_first(&pd->bio_queue);
if (n)
first_node = rb_entry(n, struct pkt_rb_node, rb_node);
}
node = first_node;
while (node) {
bio = node->bio;
zone = ZONE(bio->bi_sector, pd);
list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
if (p->sector == zone) {
bio = NULL;
goto try_next_bio;
}
}
break;
try_next_bio:
node = pkt_rbtree_next(node);
if (!node) {
n = rb_first(&pd->bio_queue);
if (n)
node = rb_entry(n, struct pkt_rb_node, rb_node);
}
if (node == first_node)
node = NULL;
}
spin_unlock(&pd->lock);
if (!bio) {
VPRINTK("handle_queue: no bio\n");
return 0;
}
pkt = pkt_get_packet_data(pd, zone);
pd->current_sector = zone + pd->settings.size;
pkt->sector = zone;
BUG_ON(pkt->frames != pd->settings.size >> 2);
pkt->write_size = 0;
/*
* Scan work queue for bios in the same zone and link them
* to this packet.
*/
spin_lock(&pd->lock);
VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
bio = node->bio;
VPRINTK("pkt_handle_queue: found zone=%llx\n",
(unsigned long long)ZONE(bio->bi_sector, pd));
if (ZONE(bio->bi_sector, pd) != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
below, wake up any waiters */
wakeup = (pd->write_congestion_on > 0
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
clear_bdi_congested(&pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
atomic_set(&pkt->run_sm, 1);
spin_lock(&pd->cdrw.active_list_lock);
list_add(&pkt->list, &pd->cdrw.pkt_active_list);
spin_unlock(&pd->cdrw.active_list_lock);
return 1;
}
/*
* Assemble a bio to write one packet and queue the bio for processing
* by the underlying block device.
*/
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
struct bio *bio;
int f;
int frames_write;
struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
for (f = 0; f < pkt->frames; f++) {
bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
}
/*
* Fill-in bvec with data from orig_bios.
*/
frames_write = 0;
spin_lock(&pkt->lock);
bio_list_for_each(bio, &pkt->orig_bios) {
int segment = bio->bi_idx;
int src_offs = 0;
int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
int num_frames = bio->bi_size / CD_FRAMESIZE;
BUG_ON(first_frame < 0);
BUG_ON(first_frame + num_frames > pkt->frames);
for (f = first_frame; f < first_frame + num_frames; f++) {
struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
while (src_offs >= src_bvl->bv_len) {
src_offs -= src_bvl->bv_len;
segment++;
BUG_ON(segment >= bio->bi_vcnt);
src_bvl = bio_iovec_idx(bio, segment);
}
if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
bvec[f].bv_page = src_bvl->bv_page;
bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
} else {
pkt_copy_bio_data(bio, segment, src_offs,
bvec[f].bv_page, bvec[f].bv_offset);
}
src_offs += CD_FRAMESIZE;
frames_write++;
}
}
pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
frames_write, (unsigned long long)pkt->sector);
BUG_ON(frames_write != pkt->write_size);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
pkt_make_local_copy(pkt, bvec);
pkt->cache_valid = 1;
} else {
pkt->cache_valid = 0;
}
/* Start the write request */
bio_init(pkt->w_bio);
pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
pkt->w_bio->bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
pkt->w_bio->bi_io_vec = bvec;
pkt->w_bio->bi_destructor = pkt_bio_destructor;
for (f = 0; f < pkt->frames; f++)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE;
pkt_queue_bio(pd, pkt->w_bio);
}
static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
{
struct bio *bio;
if (!uptodate)
pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */
while ((bio = bio_list_pop(&pkt->orig_bios)))
bio_endio(bio, uptodate ? 0 : -EIO);
}
static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
{
int uptodate;
VPRINTK("run_state_machine: pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
case PACKET_WAITING_STATE:
if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
return;
pkt->sleep_time = 0;
pkt_gather_data(pd, pkt);
pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
break;
case PACKET_READ_WAIT_STATE:
if (atomic_read(&pkt->io_wait) > 0)
return;
if (atomic_read(&pkt->io_errors) > 0) {
pkt_set_state(pkt, PACKET_RECOVERY_STATE);
} else {
pkt_start_write(pd, pkt);
}
break;
case PACKET_WRITE_WAIT_STATE:
if (atomic_read(&pkt->io_wait) > 0)
return;
if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
pkt_set_state(pkt, PACKET_FINISHED_STATE);
} else {
pkt_set_state(pkt, PACKET_RECOVERY_STATE);
}
break;
case PACKET_RECOVERY_STATE:
if (pkt_start_recovery(pkt)) {
pkt_start_write(pd, pkt);
} else {
VPRINTK("No recovery possible\n");
pkt_set_state(pkt, PACKET_FINISHED_STATE);
}
break;
case PACKET_FINISHED_STATE:
uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
pkt_finish_packet(pkt, uptodate);
return;
default:
BUG();
break;
}
}
}
static void pkt_handle_packets(struct pktcdvd_device *pd)
{
struct packet_data *pkt, *next;
VPRINTK("pkt_handle_packets\n");
/*
* Run state machine for active packets
*/
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (atomic_read(&pkt->run_sm) > 0) {
atomic_set(&pkt->run_sm, 0);
pkt_run_state_machine(pd, pkt);
}
}
/*
* Move no longer active packets to the free list
*/
spin_lock(&pd->cdrw.active_list_lock);
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
if (pkt->state == PACKET_FINISHED_STATE) {
list_del(&pkt->list);
pkt_put_packet_data(pd, pkt);
pkt_set_state(pkt, PACKET_IDLE_STATE);
atomic_set(&pd->scan_queue, 1);
}
}
spin_unlock(&pd->cdrw.active_list_lock);
}
static void pkt_count_states(struct pktcdvd_device *pd, int *states)
{
struct packet_data *pkt;
int i;
for (i = 0; i < PACKET_NUM_STATES; i++)
states[i] = 0;
spin_lock(&pd->cdrw.active_list_lock);
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
states[pkt->state]++;
}
spin_unlock(&pd->cdrw.active_list_lock);
}
/*
* kcdrwd is woken up when writes have been queued for one of our
* registered devices
*/
static int kcdrwd(void *foobar)
{
struct pktcdvd_device *pd = foobar;
struct packet_data *pkt;
long min_sleep_time, residue;
set_user_nice(current, -20);
set_freezable();
for (;;) {
DECLARE_WAITQUEUE(wait, current);
/*
* Wait until there is something to do
*/
add_wait_queue(&pd->wqueue, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
/* Check if we need to run pkt_handle_queue */
if (atomic_read(&pd->scan_queue) > 0)
goto work_to_do;
/* Check if we need to run the state machine for some packet */
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (atomic_read(&pkt->run_sm) > 0)
goto work_to_do;
}
/* Check if we need to process the iosched queues */
if (atomic_read(&pd->iosched.attention) != 0)
goto work_to_do;
/* Otherwise, go to sleep */
if (PACKET_DEBUG > 1) {
int states[PACKET_NUM_STATES];
pkt_count_states(pd, states);
VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
states[0], states[1], states[2], states[3],
states[4], states[5]);
}
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
min_sleep_time = pkt->sleep_time;
}
VPRINTK("kcdrwd: sleeping\n");
residue = schedule_timeout(min_sleep_time);
VPRINTK("kcdrwd: wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (!pkt->sleep_time)
continue;
pkt->sleep_time -= min_sleep_time - residue;
if (pkt->sleep_time <= 0) {
pkt->sleep_time = 0;
atomic_inc(&pkt->run_sm);
}
}
if (kthread_should_stop())
break;
}
work_to_do:
set_current_state(TASK_RUNNING);
remove_wait_queue(&pd->wqueue, &wait);
if (kthread_should_stop())
break;
/*
* if pkt_handle_queue returns true, we can queue
* another request.
*/
while (pkt_handle_queue(pd))
;
/*
* Handle packet state machine
*/
pkt_handle_packets(pd);
/*
* Handle iosched queues
*/
pkt_iosched_process_queue(pd);
}
return 0;
}
static void pkt_print_settings(struct pktcdvd_device *pd)
{
printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
printk("%u blocks, ", pd->settings.size >> 2);
printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
}
static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
{
memset(cgc->cmd, 0, sizeof(cgc->cmd));
cgc->cmd[0] = GPCMD_MODE_SENSE_10;
cgc->cmd[2] = page_code | (page_control << 6);
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_READ;
return pkt_generic_packet(pd, cgc);
}
static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
{
memset(cgc->cmd, 0, sizeof(cgc->cmd));
memset(cgc->buffer, 0, 2);
cgc->cmd[0] = GPCMD_MODE_SELECT_10;
cgc->cmd[1] = 0x10; /* PF */
cgc->cmd[7] = cgc->buflen >> 8;
cgc->cmd[8] = cgc->buflen & 0xff;
cgc->data_direction = CGC_DATA_WRITE;
return pkt_generic_packet(pd, cgc);
}
static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
{
struct packet_command cgc;
int ret;
/* set up command and get the disc info */
init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_DISC_INFO;
cgc.cmd[8] = cgc.buflen = 2;
cgc.quiet = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
return ret;
/* not all drives have the same disc_info length, so requeue
* packet with the length the drive tells us it can supply
*/
cgc.buflen = be16_to_cpu(di->disc_information_length) +
sizeof(di->disc_information_length);
if (cgc.buflen > sizeof(disc_information))
cgc.buflen = sizeof(disc_information);
cgc.cmd[8] = cgc.buflen;
return pkt_generic_packet(pd, &cgc);
}
static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
{
struct packet_command cgc;
int ret;
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
cgc.cmd[4] = (track & 0xff00) >> 8;
cgc.cmd[5] = track & 0xff;
cgc.cmd[8] = 8;
cgc.quiet = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
return ret;
cgc.buflen = be16_to_cpu(ti->track_information_length) +
sizeof(ti->track_information_length);
if (cgc.buflen > sizeof(track_information))
cgc.buflen = sizeof(track_information);
cgc.cmd[8] = cgc.buflen;
return pkt_generic_packet(pd, &cgc);
}
static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
long *last_written)
{
disc_information di;
track_information ti;
__u32 last_track;
int ret = -1;
if ((ret = pkt_get_disc_info(pd, &di)))
return ret;
last_track = (di.last_track_msb << 8) | di.last_track_lsb;
if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
return ret;
/* if this track is blank, try the previous. */
if (ti.blank) {
last_track--;
if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
return ret;
}
/* if last recorded field is valid, return it. */
if (ti.lra_v) {
*last_written = be32_to_cpu(ti.last_rec_address);
} else {
/* make it up instead */
*last_written = be32_to_cpu(ti.track_start) +
be32_to_cpu(ti.track_size);
if (ti.free_blocks)
*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
}
return 0;
}
/*
* write mode select package based on pd->settings
*/
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
struct packet_command cgc;
struct request_sense sense;
write_param_page *wp;
char buffer[128];
int ret, size;
/* doesn't apply to DVD+RW or DVD-RAM */
if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
return 0;
memset(buffer, 0, sizeof(buffer));
init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
pkt_dump_sense(&cgc);
return ret;
}
size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
if (size > sizeof(buffer))
size = sizeof(buffer);
/*
* now get it all
*/
init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
cgc.sense = &sense;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
pkt_dump_sense(&cgc);
return ret;
}
/*
* write page is offset header + block descriptor length
*/
wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
wp->fp = pd->settings.fp;
wp->track_mode = pd->settings.track_mode;
wp->write_type = pd->settings.write_type;
wp->data_block_type = pd->settings.block_mode;
wp->multi_session = 0;
#ifdef PACKET_USE_LS
wp->link_size = 7;
wp->ls_v = 1;
#endif
if (wp->data_block_type == PACKET_BLOCK_MODE1) {
wp->session_format = 0;
wp->subhdr2 = 0x20;
} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
wp->session_format = 0x20;
wp->subhdr2 = 8;
#if 0
wp->mcn[0] = 0x80;
memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
#endif
} else {
/*
* paranoia
*/
printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
cgc.buflen = cgc.cmd[8] = size;
if ((ret = pkt_mode_select(pd, &cgc))) {
pkt_dump_sense(&cgc);
return ret;
}
pkt_print_settings(pd);
return 0;
}
/*
* 1 -- we can write to this track, 0 -- we can't
*/
static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
switch (pd->mmc3_profile) {
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
/* The track is always writable on DVD+RW/DVD-RAM */
return 1;
default:
break;
}
if (!ti->packet || !ti->fp)
return 0;
/*
* "good" settings as per Mt Fuji.
*/
if (ti->rt == 0 && ti->blank == 0)
return 1;
if (ti->rt == 0 && ti->blank == 1)
return 1;
if (ti->rt == 1 && ti->blank == 0)
return 1;
printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
/*
* 1 -- we can write to this disc, 0 -- we can't
*/
static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
case 0xffff: /* MMC3 not supported */
break;
case 0x1a: /* DVD+RW */
case 0x13: /* DVD-RW */
case 0x12: /* DVD-RAM */
return 1;
default:
VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
return 0;
}
/*
* for disc type 0xff we should probably reserve a new track.
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
printk(DRIVER_NAME": Unknown disc. No track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
printk(DRIVER_NAME": Disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
printk(DRIVER_NAME": Can't write to last track (reserved)\n");
return 0;
}
return 1;
}
static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
{
struct packet_command cgc;
unsigned char buf[12];
disc_information di;
track_information ti;
int ret, track;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[8] = 8;
ret = pkt_generic_packet(pd, &cgc);
pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
if ((ret = pkt_get_disc_info(pd, &di))) {
printk("failed get_disc\n");
return ret;
}
if (!pkt_writable_disc(pd, &di))
return -EROFS;
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
printk(DRIVER_NAME": failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
printk(DRIVER_NAME": can't write to this track\n");
return -EROFS;
}
/*
* we keep packet size in 512 byte units, makes it easier to
* deal with request calculations.
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
printk(DRIVER_NAME": detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
printk(DRIVER_NAME": packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
if (ti.nwa_v) {
pd->nwa = be32_to_cpu(ti.next_writable);
set_bit(PACKET_NWA_VALID, &pd->flags);
}
/*
* in theory we could use lra on -RW media as well and just zero
* blocks that haven't been written yet, but in practice that
* is just a no-go. we'll use that for -R, naturally.
*/
if (ti.lra_v) {
pd->lra = be32_to_cpu(ti.last_rec_address);
set_bit(PACKET_LRA_VALID, &pd->flags);
} else {
pd->lra = 0xffffffff;
set_bit(PACKET_LRA_VALID, &pd->flags);
}
/*
* fine for now
*/
pd->settings.link_loss = 7;
pd->settings.write_type = 0; /* packet */
pd->settings.track_mode = ti.track_mode;
/*
* mode1 or mode2 disc
*/
switch (ti.data_mode) {
case PACKET_MODE1:
pd->settings.block_mode = PACKET_BLOCK_MODE1;
break;
case PACKET_MODE2:
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
printk(DRIVER_NAME": unknown data mode\n");
return -EROFS;
}
return 0;
}
/*
* enable/disable write caching on drive
*/
static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
int set)
{
struct packet_command cgc;
struct request_sense sense;
unsigned char buf[64];
int ret;
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
cgc.sense = &sense;
cgc.buflen = pd->mode_offset + 12;
/*
* caching mode page might not be there, so quiet this command
*/
cgc.quiet = 1;
if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
return ret;
buf[pd->mode_offset + 10] |= (!!set << 2);
cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
ret = pkt_mode_select(pd, &cgc);
if (ret) {
printk(DRIVER_NAME": write caching control failed\n");
pkt_dump_sense(&cgc);
} else if (!ret && set)
printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
return ret;
}
static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
{
struct packet_command cgc;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
cgc.cmd[4] = lockflag ? 1 : 0;
return pkt_generic_packet(pd, &cgc);
}
/*
* Returns drive maximum write speed
*/
static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
unsigned *write_speed)
{
struct packet_command cgc;
struct request_sense sense;
unsigned char buf[256+18];
unsigned char *cap_buf;
int ret, offset;
cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
cgc.sense = &sense;
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
sizeof(struct mode_page_header);
ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
if (ret) {
pkt_dump_sense(&cgc);
return ret;
}
}
offset = 20; /* Obsoleted field, used by older drives */
if (cap_buf[1] >= 28)
offset = 28; /* Current write speed selected */
if (cap_buf[1] >= 30) {
/* If the drive reports at least one "Logical Unit Write
* Speed Performance Descriptor Block", use the information
* in the first block. (contains the highest speed)
*/
int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
if (num_spdb > 0)
offset = 34;
}
*write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
return 0;
}
/* These tables from cdrecord - I don't have orange book */
/* standard speed CD-RW (1-4x) */
static char clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* high speed CD-RW (-10x) */
static char hs_clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/* ultra high speed CD-RW */
static char us_clv_to_speed[16] = {
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
};
/*
* reads the maximum media speed from ATIP
*/
static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
struct packet_command cgc;
struct request_sense sense;
unsigned char buf[64];
unsigned int size, st, sp;
int ret;
init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
cgc.sense = &sense;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4; /* READ ATIP */
cgc.cmd[8] = 2;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
pkt_dump_sense(&cgc);
return ret;
}
size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
if (size > sizeof(buf))
size = sizeof(buf);
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
cgc.sense = &sense;
cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
cgc.cmd[1] = 2;
cgc.cmd[2] = 4;
cgc.cmd[8] = size;
ret = pkt_generic_packet(pd, &cgc);
if (ret) {
pkt_dump_sense(&cgc);
return ret;
}
if (!(buf[6] & 0x40)) {
printk(DRIVER_NAME": Disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
st = (buf[6] >> 3) & 0x7; /* disc sub-type */
sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
/* Info from cdrecord */
switch (st) {
case 0: /* standard speed */
*speed = clv_to_speed[sp];
break;
case 1: /* high speed */
*speed = hs_clv_to_speed[sp];
break;
case 2: /* ultra high speed */
*speed = us_clv_to_speed[sp];
break;
default:
printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
return 1;
}
if (*speed) {
printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
return 0;
} else {
printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
return 1;
}
}
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
struct packet_command cgc;
struct request_sense sense;
int ret;
VPRINTK(DRIVER_NAME": Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sense = &sense;
cgc.timeout = 60*HZ;
cgc.cmd[0] = GPCMD_SEND_OPC;
cgc.cmd[1] = 1;
if ((ret = pkt_generic_packet(pd, &cgc)))
pkt_dump_sense(&cgc);
return ret;
}
static int pkt_open_write(struct pktcdvd_device *pd)
{
int ret;
unsigned int write_speed, media_write_speed, read_speed;
if ((ret = pkt_probe_settings(pd))) {
VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
return ret;
}
if ((ret = pkt_set_write_settings(pd))) {
DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
return -EIO;
}
pkt_write_caching(pd, USE_WCACHING);
if ((ret = pkt_get_max_speed(pd, &write_speed)))
write_speed = 16 * 177;
switch (pd->mmc3_profile) {
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
break;
default:
if ((ret = pkt_media_speed(pd, &media_write_speed)))
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
if ((ret = pkt_perform_opc(pd))) {
DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
}
return 0;
}
/*
* called at open time.
*/
static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
{
int ret;
long lba;
struct request_queue *q;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
goto out;
if ((ret = pkt_get_last_written(pd, &lba))) {
printk(DRIVER_NAME": pkt_get_last_written failed\n");
goto out_putdev;
}
set_capacity(pd->disk, lba << 2);
set_capacity(pd->bdev->bd_disk, lba << 2);
bd_set_size(pd->bdev, (loff_t)lba << 11);
q = bdev_get_queue(pd->bdev);
if (write) {
if ((ret = pkt_open_write(pd)))
goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
*/
spin_lock_irq(q->queue_lock);
blk_queue_max_hw_sectors(q, pd->settings.size);
spin_unlock_irq(q->queue_lock);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
clear_bit(PACKET_WRITABLE, &pd->flags);
}
if ((ret = pkt_set_segment_merging(pd, q)))
goto out_putdev;
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
printk(DRIVER_NAME": not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
}
return 0;
out_putdev:
blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
out:
return ret;
}
/*
* called when the device is closed. makes sure that the device flushes
* the internal cache before we close.
*/
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
if (flush && pkt_flush_cache(pd))
DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
pkt_shrink_pktlist(pd);
}
static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;
return pkt_devs[dev_minor];
}
static int pkt_open(struct block_device *bdev, fmode_t mode)
{
struct pktcdvd_device *pd = NULL;
int ret;
VPRINTK(DRIVER_NAME": entering open\n");
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
if (!pd) {
ret = -ENODEV;
goto out;
}
BUG_ON(pd->refcnt < 0);
pd->refcnt++;
if (pd->refcnt > 1) {
if ((mode & FMODE_WRITE) &&
!test_bit(PACKET_WRITABLE, &pd->flags)) {
ret = -EBUSY;
goto out_dec;
}
} else {
ret = pkt_open_dev(pd, mode & FMODE_WRITE);
if (ret)
goto out_dec;
/*
* needed here as well, since ext2 (among others) may change
* the blocksize at mount time
*/
set_blocksize(bdev, CD_FRAMESIZE);
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return 0;
out_dec:
pd->refcnt--;
out:
VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
}
static int pkt_close(struct gendisk *disk, fmode_t mode)
{
struct pktcdvd_device *pd = disk->private_data;
int ret = 0;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
pd->refcnt--;
BUG_ON(pd->refcnt < 0);
if (pd->refcnt == 0) {
int flush = test_bit(PACKET_WRITABLE, &pd->flags);
pkt_release_dev(pd, flush);
}
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return ret;
}
static void pkt_end_io_read_cloned(struct bio *bio, int err)
{
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
bio_put(bio);
bio_endio(psd->bio, err);
mempool_free(psd, psd_pool);
pkt_bio_finished(pd);
}
static void pkt_make_request(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
struct pkt_rb_node *node;
pd = q->queuedata;
if (!pd) {
printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
goto end_io;
}
/*
* Clone READ bios so we can have our own bi_end_io callback.
*/
if (bio_data_dir(bio) == READ) {
struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
psd->pd = pd;
psd->bio = bio;
cloned_bio->bi_bdev = pd->bdev;
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9;
pkt_queue_bio(pd, cloned_bio);
return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
pd->name, (unsigned long long)bio->bi_sector);
goto end_io;
}
if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
printk(DRIVER_NAME": wrong bio size\n");
goto end_io;
}
blk_queue_bounce(q, &bio);
zone = ZONE(bio->bi_sector, pd);
VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector,
(unsigned long long)(bio->bi_sector + bio_sectors(bio)));
/* Check if we have to split the bio */
{
struct bio_pair *bp;
sector_t last_zone;
int first_sectors;
last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
bp = bio_split(bio, first_sectors);
BUG_ON(!bp);
pkt_make_request(q, &bp->bio1);
pkt_make_request(q, &bp->bio2);
bio_pair_release(bp);
return;
}
}
/*
* If we find a matching packet in state WAITING or READ_WAIT, we can
* just append this bio to that packet.
*/
spin_lock(&pd->cdrw.active_list_lock);
blocked_bio = 0;
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (pkt->sector == zone) {
spin_lock(&pkt->lock);
if ((pkt->state == PACKET_WAITING_STATE) ||
(pkt->state == PACKET_READ_WAIT_STATE)) {
bio_list_add(&pkt->orig_bios, bio);
pkt->write_size += bio->bi_size / CD_FRAMESIZE;
if ((pkt->write_size >= pkt->frames) &&
(pkt->state == PACKET_WAITING_STATE)) {
atomic_inc(&pkt->run_sm);
wake_up(&pd->wqueue);
}
spin_unlock(&pkt->lock);
spin_unlock(&pd->cdrw.active_list_lock);
return;
} else {
blocked_bio = 1;
}
spin_unlock(&pkt->lock);
}
}
spin_unlock(&pd->cdrw.active_list_lock);
/*
* Test if there is enough room left in the bio work queue
* (queue size >= congestion on mark).
* If not, wait till the work queue size is below the congestion off mark.
*/
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
spin_lock(&pd->lock);
} while(pd->bio_queue_size > pd->write_congestion_off);
}
spin_unlock(&pd->lock);
/*
* No matching packet found. Store the bio in the work queue.
*/
node = mempool_alloc(pd->rb_pool, GFP_NOIO);
node->bio = bio;
spin_lock(&pd->lock);
BUG_ON(pd->bio_queue_size < 0);
was_empty = (pd->bio_queue_size == 0);
pkt_rbtree_insert(pd, node);
spin_unlock(&pd->lock);
/*
* Wake up the worker thread.
*/
atomic_set(&pd->scan_queue, 1);
if (was_empty) {
/* This wake_up is required for correct operation */
wake_up(&pd->wqueue);
} else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
/*
* This wake up is not required for correct operation,
* but improves performance in some cases.
*/
wake_up(&pd->wqueue);
}
return;
end_io:
bio_io_error(bio);
}
static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
sector_t zone = ZONE(bmd->bi_sector, pd);
int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
int remaining = (pd->settings.size << 9) - used;
int remaining2;
/*
* A bio <= PAGE_SIZE must be allowed. If it crosses a packet
* boundary, pkt_make_request() will split the bio.
*/
remaining2 = PAGE_SIZE - bmd->bi_size;
remaining = max(remaining, remaining2);
BUG_ON(remaining < 0);
return remaining;
}
static void pkt_init_queue(struct pktcdvd_device *pd)
{
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;
}
static int pkt_seq_show(struct seq_file *m, void *p)
{
struct pktcdvd_device *pd = m->private;
char *msg;
char bdev_buf[BDEVNAME_SIZE];
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
bdevname(pd->bdev, bdev_buf));
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
if (pd->settings.write_type == 0)
msg = "Packet";
else
msg = "Unknown";
seq_printf(m, "\twrite type:\t\t%s\n", msg);
seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
msg = "Mode 1";
else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
msg = "Mode 2";
else
msg = "Unknown";
seq_printf(m, "\tblock mode:\t\t%s\n", msg);
seq_printf(m, "\nStatistics:\n");
seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
seq_printf(m, "\nMisc:\n");
seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
seq_printf(m, "\nQueue state:\n");
seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
pkt_count_states(pd, states);
seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
states[0], states[1], states[2], states[3], states[4], states[5]);
seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
pd->write_congestion_off,
pd->write_congestion_on);
return 0;
}
static int pkt_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, pkt_seq_show, PDE(inode)->data);
}
static const struct file_operations pkt_proc_fops = {
.open = pkt_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release
};
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
int ret = 0;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
if (pd->pkt_dev == dev) {
printk(DRIVER_NAME": Recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
return -EBUSY;
}
}
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
if (ret)
return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
pd->bdev = bdev;
set_blocksize(bdev, CD_FRAMESIZE);
pkt_init_queue(pd);
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
printk(DRIVER_NAME": can't start kernel thread\n");
ret = -ENOMEM;
goto out_mem;
}
proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
return 0;
out_mem:
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
}
static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
int ret;
VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
case CDROMEJECT:
/*
* The door gets locked when the device is opened, so we
* have to unlock it or else the eject command fails.
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
/* fallthru */
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
case CDROMMULTISESSION:
case CDROMREADTOCENTRY:
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
break;
default:
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
return ret;
}
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct pktcdvd_device *pd = disk->private_data;
struct gendisk *attached_disk;
if (!pd)
return 0;
if (!pd->bdev)
return 0;
attached_disk = pd->bdev->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
}
static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
.check_events = pkt_check_events,
};
static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
}
/*
* Set up mapping from pktcdvd device to CD-ROM device.
*/
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
{
int idx;
int ret = -ENOMEM;
struct pktcdvd_device *pd;
struct gendisk *disk;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++)
if (!pkt_devs[idx])
break;
if (idx == MAX_WRITERS) {
printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
ret = -EBUSY;
goto out_mutex;
}
pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
if (!pd)
goto out_mutex;
pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
sizeof(struct pkt_rb_node));
if (!pd->rb_pool)
goto out_mem;
INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
spin_lock_init(&pd->cdrw.active_list_lock);
spin_lock_init(&pd->lock);
spin_lock_init(&pd->iosched.lock);
bio_list_init(&pd->iosched.read_queue);
bio_list_init(&pd->iosched.write_queue);
sprintf(pd->name, DRIVER_NAME"%d", idx);
init_waitqueue_head(&pd->wqueue);
pd->bio_queue = RB_ROOT;
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
disk = alloc_disk(1);
if (!disk)
goto out_mem;
pd->disk = disk;
disk->major = pktdev_major;
disk->first_minor = idx;
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE;
strcpy(disk->disk_name, pd->name);
disk->devnode = pktcdvd_devnode;
disk->private_data = pd;
disk->queue = blk_alloc_queue(GFP_KERNEL);
if (!disk->queue)
goto out_mem2;
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
goto out_new_dev;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
disk->async_events = pd->bdev->bd_disk->async_events;
add_disk(disk);
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);
pkt_devs[idx] = pd;
if (pkt_dev)
*pkt_dev = pd->pkt_dev;
mutex_unlock(&ctl_mutex);
return 0;
out_new_dev:
blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
if (pd->rb_pool)
mempool_destroy(pd->rb_pool);
kfree(pd);
out_mutex:
mutex_unlock(&ctl_mutex);
printk(DRIVER_NAME": setup of pktcdvd device failed\n");
return ret;
}
/*
* Tear down mapping from pktcdvd device to CD-ROM device.
*/
static int pkt_remove_dev(dev_t pkt_dev)
{
struct pktcdvd_device *pd;
int idx;
int ret = 0;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
for (idx = 0; idx < MAX_WRITERS; idx++) {
pd = pkt_devs[idx];
if (pd && (pd->pkt_dev == pkt_dev))
break;
}
if (idx == MAX_WRITERS) {
DPRINTK(DRIVER_NAME": dev not setup\n");
ret = -ENXIO;
goto out;
}
if (pd->refcnt > 0) {
ret = -EBUSY;
goto out;
}
if (!IS_ERR(pd->cdrw.thread))
kthread_stop(pd->cdrw.thread);
pkt_devs[idx] = NULL;
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
del_gendisk(pd->disk);
blk_cleanup_queue(pd->disk->queue);
put_disk(pd->disk);
mempool_destroy(pd->rb_pool);
kfree(pd);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
out:
mutex_unlock(&ctl_mutex);
return ret;
}
static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
{
struct pktcdvd_device *pd;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
ctrl_cmd->pkt_dev = 0;
}
ctrl_cmd->num_devices = MAX_WRITERS;
mutex_unlock(&ctl_mutex);
}
static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct pkt_ctrl_command ctrl_cmd;
int ret = 0;
dev_t pkt_dev = 0;
if (cmd != PACKET_CTRL_CMD)
return -ENOTTY;
if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
return -EFAULT;
switch (ctrl_cmd.command) {
case PKT_CTRL_CMD_SETUP:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
break;
case PKT_CTRL_CMD_TEARDOWN:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
break;
case PKT_CTRL_CMD_STATUS:
pkt_get_status(&ctrl_cmd);
break;
default:
return -ENOTTY;
}
if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
return -EFAULT;
return ret;
}
#ifdef CONFIG_COMPAT
static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct file_operations pkt_ctl_fops = {
.open = nonseekable_open,
.unlocked_ioctl = pkt_ctl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = pkt_ctl_compat_ioctl,
#endif
.owner = THIS_MODULE,
.llseek = no_llseek,
};
static struct miscdevice pkt_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = DRIVER_NAME,
.nodename = "pktcdvd/control",
.fops = &pkt_ctl_fops
};
static int __init pkt_init(void)
{
int ret;
mutex_init(&ctl_mutex);
psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
sizeof(struct packet_stacked_data));
if (!psd_pool)
return -ENOMEM;
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
printk(DRIVER_NAME": Unable to register block device\n");
goto out2;
}
if (!pktdev_major)
pktdev_major = ret;
ret = pkt_sysfs_init();
if (ret)
goto out;
pkt_debugfs_init();
ret = misc_register(&pkt_misc);
if (ret) {
printk(DRIVER_NAME": Unable to register misc device\n");
goto out_misc;
}
pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
return 0;
out_misc:
pkt_debugfs_cleanup();
pkt_sysfs_cleanup();
out:
unregister_blkdev(pktdev_major, DRIVER_NAME);
out2:
mempool_destroy(psd_pool);
return ret;
}
static void __exit pkt_exit(void)
{
remove_proc_entry("driver/"DRIVER_NAME, NULL);
misc_deregister(&pkt_misc);
pkt_debugfs_cleanup();
pkt_sysfs_cleanup();
unregister_blkdev(pktdev_major, DRIVER_NAME);
mempool_destroy(psd_pool);
}
MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
MODULE_LICENSE("GPL");
module_init(pkt_init);
module_exit(pkt_exit);
| gpl-2.0 |
bju2000/lge-kernel-gproj | drivers/ps3/ps3-lpm.c | 8168 | 31964 | /*
* PS3 Logical Performance Monitor.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/ps3.h>
#include <asm/lv1call.h>
#include <asm/cell-pmu.h>
/* BOOKMARK tag macros */
#define PS3_PM_BOOKMARK_START 0x8000000000000000ULL
#define PS3_PM_BOOKMARK_STOP 0x4000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_KERNEL 0x1000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_USER 0x3000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_HI 0xF000000000000000ULL
#define PS3_PM_BOOKMARK_TAG_MASK_LO 0x0F00000000000000ULL
/* CBE PM CONTROL register macros */
#define PS3_PM_CONTROL_PPU_TH0_BOOKMARK 0x00001000
#define PS3_PM_CONTROL_PPU_TH1_BOOKMARK 0x00000800
#define PS3_PM_CONTROL_PPU_COUNT_MODE_MASK 0x000C0000
#define PS3_PM_CONTROL_PPU_COUNT_MODE_PROBLEM 0x00080000
#define PS3_WRITE_PM_MASK 0xFFFFFFFFFFFFFFFFULL
/* CBE PM START STOP register macros */
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START 0x02000000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START 0x01000000
#define PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP 0x00020000
#define PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP 0x00010000
#define PS3_PM_START_STOP_START_MASK 0xFF000000
#define PS3_PM_START_STOP_STOP_MASK 0x00FF0000
/* CBE PM COUNTER register macres */
#define PS3_PM_COUNTER_MASK_HI 0xFFFFFFFF00000000ULL
#define PS3_PM_COUNTER_MASK_LO 0x00000000FFFFFFFFULL
/* BASE SIGNAL GROUP NUMBER macros */
#define PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER 0
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER1 6
#define PM_ISLAND2_SIGNAL_GROUP_NUMBER2 7
#define PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER 7
#define PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER 15
#define PM_SPU_TRIGGER_SIGNAL_GROUP_NUMBER 17
#define PM_SPU_EVENT_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER 18
#define PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER 24
#define PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER 49
#define PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER 52
#define PM_SIG_GROUP_SPU 41
#define PM_SIG_GROUP_SPU_TRIGGER 42
#define PM_SIG_GROUP_SPU_EVENT 43
#define PM_SIG_GROUP_MFC_MAX 60
/**
* struct ps3_lpm_shadow_regs - Performance monitor shadow registers.
*
* @pm_control: Shadow of the processor's pm_control register.
* @pm_start_stop: Shadow of the processor's pm_start_stop register.
* @group_control: Shadow of the processor's group_control register.
* @debug_bus_control: Shadow of the processor's debug_bus_control register.
*
* The logical performance monitor provides a write-only interface to
* these processor registers. These shadow variables cache the processor
* register values for reading.
*
* The initial value of the shadow registers at lpm creation is
* PS3_LPM_SHADOW_REG_INIT.
*/
struct ps3_lpm_shadow_regs {
u64 pm_control;
u64 pm_start_stop;
u64 group_control;
u64 debug_bus_control;
};
#define PS3_LPM_SHADOW_REG_INIT 0xFFFFFFFF00000000ULL
/**
* struct ps3_lpm_priv - Private lpm device data.
*
* @open: An atomic variable indicating the lpm driver has been opened.
* @rights: The lpm rigths granted by the system policy module. A logical
* OR of enum ps3_lpm_rights.
* @node_id: The node id of a BE prosessor whose performance monitor this
* lpar has the right to use.
* @pu_id: The lv1 id of the logical PU.
* @lpm_id: The lv1 id of this lpm instance.
* @outlet_id: The outlet created by lv1 for this lpm instance.
* @tb_count: The number of bytes of data held in the lv1 trace buffer.
* @tb_cache: Kernel buffer to receive the data from the lv1 trace buffer.
* Must be 128 byte aligned.
* @tb_cache_size: Size of the kernel @tb_cache buffer. Must be 128 byte
* aligned.
* @tb_cache_internal: An unaligned buffer allocated by this driver to be
* used for the trace buffer cache when ps3_lpm_open() is called with a
* NULL tb_cache argument. Otherwise unused.
* @shadow: Processor register shadow of type struct ps3_lpm_shadow_regs.
* @sbd: The struct ps3_system_bus_device attached to this driver.
*
* The trace buffer is a buffer allocated and used internally to the lv1
* hypervisor to collect trace data. The trace buffer cache is a guest
* buffer that accepts the trace data from the trace buffer.
*/
struct ps3_lpm_priv {
atomic_t open;
u64 rights;
u64 node_id;
u64 pu_id;
u64 lpm_id;
u64 outlet_id;
u64 tb_count;
void *tb_cache;
u64 tb_cache_size;
void *tb_cache_internal;
struct ps3_lpm_shadow_regs shadow;
struct ps3_system_bus_device *sbd;
};
enum {
PS3_LPM_DEFAULT_TB_CACHE_SIZE = 0x4000,
};
/**
* lpm_priv - Static instance of the lpm data.
*
* Since the exported routines don't support the notion of a device
* instance we need to hold the instance in this static variable
* and then only allow at most one instance at a time to be created.
*/
static struct ps3_lpm_priv *lpm_priv;
static struct device *sbd_core(void)
{
BUG_ON(!lpm_priv || !lpm_priv->sbd);
return &lpm_priv->sbd->core;
}
/**
* use_start_stop_bookmark - Enable the PPU bookmark trace.
*
* And it enables PPU bookmark triggers ONLY if the other triggers are not set.
* The start/stop bookmarks are inserted at ps3_enable_pm() and ps3_disable_pm()
* to start/stop LPM.
*
* Used to get good quality of the performance counter.
*/
enum {use_start_stop_bookmark = 1,};
void ps3_set_bookmark(u64 bookmark)
{
/*
* As per the PPE book IV, to avoid bookmark loss there must
* not be a traced branch within 10 cycles of setting the
* SPRN_BKMK register. The actual text is unclear if 'within'
* includes cycles before the call.
*/
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
mtspr(SPRN_BKMK, bookmark);
asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
}
EXPORT_SYMBOL_GPL(ps3_set_bookmark);
void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id)
{
u64 bookmark;
bookmark = (get_tb() & 0x00000000FFFFFFFFULL) |
PS3_PM_BOOKMARK_TAG_KERNEL;
bookmark = ((tag << 56) & PS3_PM_BOOKMARK_TAG_MASK_LO) |
(incident << 48) | (th_id << 32) | bookmark;
ps3_set_bookmark(bookmark);
}
EXPORT_SYMBOL_GPL(ps3_set_pm_bookmark);
/**
* ps3_read_phys_ctr - Read physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
u32 ps3_read_phys_ctr(u32 cpu, u32 phys_ctr)
{
int result;
u64 counter0415;
u64 counter2637;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id, 0, 0, 0, 0, &counter0415,
&counter2637);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, %s\n", __func__, __LINE__, phys_ctr,
ps3_result(result));
return 0;
}
switch (phys_ctr) {
case 0:
return counter0415 >> 32;
case 1:
return counter0415 & PS3_PM_COUNTER_MASK_LO;
case 2:
return counter2637 >> 32;
case 3:
return counter2637 & PS3_PM_COUNTER_MASK_LO;
default:
BUG();
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_phys_ctr);
/**
* ps3_write_phys_ctr - Write physical counter registers.
*
* Each physical counter can act as one 32 bit counter or as two 16 bit
* counters.
*/
void ps3_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
{
u64 counter0415;
u64 counter0415_mask;
u64 counter2637;
u64 counter2637_mask;
int result;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
switch (phys_ctr) {
case 0:
counter0415 = (u64)val << 32;
counter0415_mask = PS3_PM_COUNTER_MASK_HI;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 1:
counter0415 = (u64)val;
counter0415_mask = PS3_PM_COUNTER_MASK_LO;
counter2637 = 0x0;
counter2637_mask = 0x0;
break;
case 2:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val << 32;
counter2637_mask = PS3_PM_COUNTER_MASK_HI;
break;
case 3:
counter0415 = 0x0;
counter0415_mask = 0x0;
counter2637 = (u64)val;
counter2637_mask = PS3_PM_COUNTER_MASK_LO;
break;
default:
BUG();
}
result = lv1_set_lpm_counter(lpm_priv->lpm_id,
counter0415, counter0415_mask,
counter2637, counter2637_mask,
&counter0415, &counter2637);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter failed: "
"phys_ctr %u, val %u, %s\n", __func__, __LINE__,
phys_ctr, val, ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_phys_ctr);
/**
* ps3_read_ctr - Read counter.
*
* Read 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
u32 ps3_read_ctr(u32 cpu, u32 ctr)
{
u32 val;
u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16)
val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
return val;
}
EXPORT_SYMBOL_GPL(ps3_read_ctr);
/**
* ps3_write_ctr - Write counter.
*
* Write 16 or 32 bits depending on the current size of the counter.
* Counters 4, 5, 6 & 7 are always 16 bit.
*/
void ps3_write_ctr(u32 cpu, u32 ctr, u32 val)
{
u32 phys_ctr;
u32 phys_val;
phys_ctr = ctr & (NR_PHYS_CTRS - 1);
if (ps3_get_ctr_size(cpu, phys_ctr) == 16) {
phys_val = ps3_read_phys_ctr(cpu, phys_ctr);
if (ctr < NR_PHYS_CTRS)
val = (val << 16) | (phys_val & 0xffff);
else
val = (val & 0xffff) | (phys_val & 0xffff0000);
}
ps3_write_phys_ctr(cpu, phys_ctr, val);
}
EXPORT_SYMBOL_GPL(ps3_write_ctr);
/**
* ps3_read_pm07_control - Read counter control registers.
*
* Each logical counter has a corresponding control register.
*/
u32 ps3_read_pm07_control(u32 cpu, u32 ctr)
{
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm07_control);
/**
* ps3_write_pm07_control - Write counter control registers.
*
* Each logical counter has a corresponding control register.
*/
void ps3_write_pm07_control(u32 cpu, u32 ctr, u32 val)
{
int result;
static const u64 mask = 0xFFFFFFFFFFFFFFFFULL;
u64 old_value;
if (ctr >= NR_CTRS) {
dev_dbg(sbd_core(), "%s:%u: ctr too big: %u\n", __func__,
__LINE__, ctr);
return;
}
result = lv1_set_lpm_counter_control(lpm_priv->lpm_id, ctr, val, mask,
&old_value);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_set_lpm_counter_control "
"failed: ctr %u, %s\n", __func__, __LINE__, ctr,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm07_control);
/**
* ps3_read_pm - Read Other LPM control registers.
*/
u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
{
int result = 0;
u64 val = 0;
switch (reg) {
case pm_control:
return lpm_priv->shadow.pm_control;
case trace_address:
return CBE_PM_TRACE_BUF_EMPTY;
case pm_start_stop:
return lpm_priv->shadow.pm_start_stop;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 set_inteval failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case group_control:
return lpm_priv->shadow.group_control;
case debug_bus_control:
return lpm_priv->shadow.debug_bus_control;
case pm_status:
result = lv1_get_lpm_interrupt_status(lpm_priv->lpm_id,
&val);
if (result) {
val = 0;
dev_dbg(sbd_core(), "%s:%u: lv1 get_lpm_status failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
return (u32)val;
case ext_tr_timer:
return 0;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_read_pm);
/**
* ps3_write_pm - Write Other LPM control registers.
*/
void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
{
int result = 0;
u64 dummy;
switch (reg) {
case group_control:
if (val != lpm_priv->shadow.group_control)
result = lv1_set_lpm_group_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.group_control = val;
break;
case debug_bus_control:
if (val != lpm_priv->shadow.debug_bus_control)
result = lv1_set_lpm_debug_bus_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.debug_bus_control = val;
break;
case pm_control:
if (use_start_stop_bookmark)
val |= (PS3_PM_CONTROL_PPU_TH0_BOOKMARK |
PS3_PM_CONTROL_PPU_TH1_BOOKMARK);
if (val != lpm_priv->shadow.pm_control)
result = lv1_set_lpm_general_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
0, 0, &dummy,
&dummy);
lpm_priv->shadow.pm_control = val;
break;
case pm_interval:
result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
PS3_WRITE_PM_MASK, &dummy);
break;
case pm_start_stop:
if (val != lpm_priv->shadow.pm_start_stop)
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
val,
PS3_WRITE_PM_MASK,
&dummy);
lpm_priv->shadow.pm_start_stop = val;
break;
case trace_address:
case ext_tr_timer:
case pm_status:
break;
default:
dev_dbg(sbd_core(), "%s:%u: unknown reg: %d\n", __func__,
__LINE__, reg);
BUG();
break;
}
if (result)
dev_err(sbd_core(), "%s:%u: lv1 set_control failed: "
"reg %u, %s\n", __func__, __LINE__, reg,
ps3_result(result));
}
EXPORT_SYMBOL_GPL(ps3_write_pm);
/**
* ps3_get_ctr_size - Get the size of a physical counter.
*
* Returns either 16 or 32.
*/
u32 ps3_get_ctr_size(u32 cpu, u32 phys_ctr)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return 0;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
return (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
}
EXPORT_SYMBOL_GPL(ps3_get_ctr_size);
/**
* ps3_set_ctr_size - Set the size of a physical counter to 16 or 32 bits.
*/
void ps3_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
{
u32 pm_ctrl;
if (phys_ctr >= NR_PHYS_CTRS) {
dev_dbg(sbd_core(), "%s:%u: phys_ctr too big: %u\n", __func__,
__LINE__, phys_ctr);
return;
}
pm_ctrl = ps3_read_pm(cpu, pm_control);
switch (ctr_size) {
case 16:
pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
case 32:
pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
ps3_write_pm(cpu, pm_control, pm_ctrl);
break;
default:
BUG();
}
}
EXPORT_SYMBOL_GPL(ps3_set_ctr_size);
static u64 pm_translate_signal_group_number_on_island2(u64 subgroup)
{
if (subgroup == 2)
subgroup = 3;
if (subgroup <= 6)
return PM_ISLAND2_BASE_SIGNAL_GROUP_NUMBER + subgroup;
else if (subgroup == 7)
return PM_ISLAND2_SIGNAL_GROUP_NUMBER1;
else
return PM_ISLAND2_SIGNAL_GROUP_NUMBER2;
}
static u64 pm_translate_signal_group_number_on_island3(u64 subgroup)
{
switch (subgroup) {
case 2:
case 3:
case 4:
subgroup += 2;
break;
case 5:
subgroup = 8;
break;
default:
break;
}
return PM_ISLAND3_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island4(u64 subgroup)
{
return PM_ISLAND4_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island5(u64 subgroup)
{
switch (subgroup) {
case 3:
subgroup = 4;
break;
case 4:
subgroup = 6;
break;
default:
break;
}
return PM_ISLAND5_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island6(u64 subgroup,
u64 subsubgroup)
{
switch (subgroup) {
case 3:
case 4:
case 5:
subgroup += 1;
break;
default:
break;
}
switch (subsubgroup) {
case 4:
case 5:
case 6:
subsubgroup += 2;
break;
case 7:
case 8:
case 9:
case 10:
subsubgroup += 4;
break;
case 11:
case 12:
case 13:
subsubgroup += 5;
break;
default:
break;
}
if (subgroup <= 5)
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup);
else
return (PM_ISLAND6_BASE_SIGNAL_GROUP_NUMBER + subgroup
+ subsubgroup - 1);
}
static u64 pm_translate_signal_group_number_on_island7(u64 subgroup)
{
return PM_ISLAND7_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_translate_signal_group_number_on_island8(u64 subgroup)
{
return PM_ISLAND8_BASE_SIGNAL_GROUP_NUMBER + subgroup;
}
static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group)
{
u64 island;
u64 subgroup;
u64 subsubgroup;
subgroup = 0;
subsubgroup = 0;
island = 0;
if (group < 1000) {
if (group < 100) {
if (20 <= group && group < 30) {
island = 2;
subgroup = group - 20;
} else if (30 <= group && group < 40) {
island = 3;
subgroup = group - 30;
} else if (40 <= group && group < 50) {
island = 4;
subgroup = group - 40;
} else if (50 <= group && group < 60) {
island = 5;
subgroup = group - 50;
} else if (60 <= group && group < 70) {
island = 6;
subgroup = group - 60;
} else if (70 <= group && group < 80) {
island = 7;
subgroup = group - 70;
} else if (80 <= group && group < 90) {
island = 8;
subgroup = group - 80;
}
} else if (200 <= group && group < 300) {
island = 2;
subgroup = group - 200;
} else if (600 <= group && group < 700) {
island = 6;
subgroup = 5;
subsubgroup = group - 650;
}
} else if (6000 <= group && group < 7000) {
island = 6;
subgroup = 5;
subsubgroup = group - 6500;
}
switch (island) {
case 2:
return pm_translate_signal_group_number_on_island2(subgroup);
case 3:
return pm_translate_signal_group_number_on_island3(subgroup);
case 4:
return pm_translate_signal_group_number_on_island4(subgroup);
case 5:
return pm_translate_signal_group_number_on_island5(subgroup);
case 6:
return pm_translate_signal_group_number_on_island6(subgroup,
subsubgroup);
case 7:
return pm_translate_signal_group_number_on_island7(subgroup);
case 8:
return pm_translate_signal_group_number_on_island8(subgroup);
default:
dev_dbg(sbd_core(), "%s:%u: island not found: %llu\n", __func__,
__LINE__, group);
BUG();
break;
}
return 0;
}
static u64 pm_bus_word_to_ps3_lv1_bus_word(u8 word)
{
switch (word) {
case 1:
return 0xF000;
case 2:
return 0x0F00;
case 4:
return 0x00F0;
case 8:
default:
return 0x000F;
}
}
static int __ps3_set_signal(u64 lv1_signal_group, u64 bus_select,
u64 signal_select, u64 attr1, u64 attr2, u64 attr3)
{
int ret;
ret = lv1_set_lpm_signal(lpm_priv->lpm_id, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(),
"%s:%u: error:%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
__func__, __LINE__, ret, lv1_signal_group, bus_select,
signal_select, attr1, attr2, attr3);
return ret;
}
int ps3_set_signal(u64 signal_group, u8 signal_bit, u16 sub_unit,
u8 bus_word)
{
int ret;
u64 lv1_signal_group;
u64 bus_select;
u64 signal_select;
u64 attr1, attr2, attr3;
if (signal_group == 0)
return __ps3_set_signal(0, 0, 0, 0, 0, 0);
lv1_signal_group =
pm_signal_group_to_ps3_lv1_signal_group(signal_group);
bus_select = pm_bus_word_to_ps3_lv1_bus_word(bus_word);
switch (signal_group) {
case PM_SIG_GROUP_SPU_TRIGGER:
signal_select = 1;
signal_select = signal_select << (63 - signal_bit);
break;
case PM_SIG_GROUP_SPU_EVENT:
signal_select = 1;
signal_select = (signal_select << (63 - signal_bit)) | 0x3;
break;
default:
signal_select = 0;
break;
}
/*
* 0: physical object.
* 1: logical object.
* This parameter is only used for the PPE and SPE signals.
*/
attr1 = 1;
/*
* This parameter is used to specify the target physical/logical
* PPE/SPE object.
*/
if (PM_SIG_GROUP_SPU <= signal_group &&
signal_group < PM_SIG_GROUP_MFC_MAX)
attr2 = sub_unit;
else
attr2 = lpm_priv->pu_id;
/*
* This parameter is only used for setting the SPE signal.
*/
attr3 = 0;
ret = __ps3_set_signal(lv1_signal_group, bus_select, signal_select,
attr1, attr2, attr3);
if (ret)
dev_err(sbd_core(), "%s:%u: __ps3_set_signal failed: %d\n",
__func__, __LINE__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(ps3_set_signal);
u32 ps3_get_hw_thread_id(int cpu)
{
return get_hard_smp_processor_id(cpu);
}
EXPORT_SYMBOL_GPL(ps3_get_hw_thread_id);
/**
* ps3_enable_pm - Enable the entire performance monitoring unit.
*
* When we enable the LPM, all pending writes to counters get committed.
*/
void ps3_enable_pm(u32 cpu)
{
int result;
u64 tmp;
int insert_bookmark = 0;
lpm_priv->tb_count = 0;
if (use_start_stop_bookmark) {
if (!(lpm_priv->shadow.pm_start_stop &
(PS3_PM_START_STOP_START_MASK
| PS3_PM_START_STOP_STOP_MASK))) {
result = lv1_set_lpm_trigger_control(lpm_priv->lpm_id,
(PS3_PM_START_STOP_PPU_TH0_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_START |
PS3_PM_START_STOP_PPU_TH0_BOOKMARK_STOP |
PS3_PM_START_STOP_PPU_TH1_BOOKMARK_STOP),
0xFFFFFFFFFFFFFFFFULL, &tmp);
if (result)
dev_err(sbd_core(), "%s:%u: "
"lv1_set_lpm_trigger_control failed: "
"%s\n", __func__, __LINE__,
ps3_result(result));
insert_bookmark = !result;
}
}
result = lv1_start_lpm(lpm_priv->lpm_id);
if (result)
dev_err(sbd_core(), "%s:%u: lv1_start_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
if (use_start_stop_bookmark && !result && insert_bookmark)
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_START);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm);
/**
* ps3_disable_pm - Disable the entire performance monitoring unit.
*/
void ps3_disable_pm(u32 cpu)
{
int result;
u64 tmp;
ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_STOP);
result = lv1_stop_lpm(lpm_priv->lpm_id, &tmp);
if (result) {
if(result != LV1_WRONG_STATE)
dev_err(sbd_core(), "%s:%u: lv1_stop_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
return;
}
lpm_priv->tb_count = tmp;
dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__,
lpm_priv->tb_count, lpm_priv->tb_count);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm);
/**
* ps3_lpm_copy_tb - Copy data from the trace buffer to a kernel buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: Copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
memcpy(buf, lpm_priv->tb_cache, tmp);
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb);
/**
* ps3_lpm_copy_tb_to_user - Copy data from the trace buffer to a user buffer.
* @offset: Offset in bytes from the start of the trace buffer.
* @buf: A __user copy destination.
* @count: Maximum count of bytes to copy.
* @bytes_copied: Pointer to a variable that will receive the number of
* bytes copied to @buf.
*
* On error @buf will contain any successfully copied trace buffer data
* and bytes_copied will be set to the number of bytes successfully copied.
*/
int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
unsigned long count, unsigned long *bytes_copied)
{
int result;
*bytes_copied = 0;
if (!lpm_priv->tb_cache)
return -EPERM;
if (offset >= lpm_priv->tb_count)
return 0;
count = min_t(u64, count, lpm_priv->tb_count - offset);
while (*bytes_copied < count) {
const unsigned long request = count - *bytes_copied;
u64 tmp;
result = lv1_copy_lpm_trace_buffer(lpm_priv->lpm_id, offset,
request, &tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%lx\n",
__func__, __LINE__, request, offset);
dev_err(sbd_core(), "%s:%u: lv1_copy_lpm_trace_buffer "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
return result == LV1_WRONG_STATE ? -EBUSY : -EINVAL;
}
result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
if (result) {
dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n",
__func__, __LINE__, tmp, buf);
dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
__func__, __LINE__, result);
return -EFAULT;
}
buf += tmp;
*bytes_copied += tmp;
offset += tmp;
}
dev_dbg(sbd_core(), "%s:%u: copied %lxh bytes\n", __func__, __LINE__,
*bytes_copied);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_copy_tb_to_user);
/**
* ps3_get_and_clear_pm_interrupts -
*
* Clearing interrupts for the entire performance monitoring unit.
* Reading pm_status clears the interrupt bits.
*/
u32 ps3_get_and_clear_pm_interrupts(u32 cpu)
{
return ps3_read_pm(cpu, pm_status);
}
EXPORT_SYMBOL_GPL(ps3_get_and_clear_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Enabling interrupts for the entire performance monitoring unit.
* Enables the interrupt bits in the pm_status register.
*/
void ps3_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{
if (mask)
ps3_write_pm(cpu, pm_status, mask);
}
EXPORT_SYMBOL_GPL(ps3_enable_pm_interrupts);
/**
* ps3_enable_pm_interrupts -
*
* Disabling interrupts for the entire performance monitoring unit.
*/
void ps3_disable_pm_interrupts(u32 cpu)
{
ps3_get_and_clear_pm_interrupts(cpu);
ps3_write_pm(cpu, pm_status, 0);
}
EXPORT_SYMBOL_GPL(ps3_disable_pm_interrupts);
/**
* ps3_lpm_open - Open the logical performance monitor device.
* @tb_type: Specifies the type of trace buffer lv1 should use for this lpm
* instance, specified by one of enum ps3_lpm_tb_type.
* @tb_cache: Optional user supplied buffer to use as the trace buffer cache.
* If NULL, the driver will allocate and manage an internal buffer.
* Unused when when @tb_type is PS3_LPM_TB_TYPE_NONE.
* @tb_cache_size: The size in bytes of the user supplied @tb_cache buffer.
* Unused when @tb_cache is NULL or @tb_type is PS3_LPM_TB_TYPE_NONE.
*/
int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
u64 tb_cache_size)
{
int result;
u64 tb_size;
BUG_ON(!lpm_priv);
BUG_ON(tb_type != PS3_LPM_TB_TYPE_NONE
&& tb_type != PS3_LPM_TB_TYPE_INTERNAL);
if (tb_type == PS3_LPM_TB_TYPE_NONE && tb_cache)
dev_dbg(sbd_core(), "%s:%u: bad in vals\n", __func__, __LINE__);
if (!atomic_add_unless(&lpm_priv->open, 1, 1)) {
dev_dbg(sbd_core(), "%s:%u: busy\n", __func__, __LINE__);
return -EBUSY;
}
/* Note tb_cache needs 128 byte alignment. */
if (tb_type == PS3_LPM_TB_TYPE_NONE) {
lpm_priv->tb_cache_size = 0;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = NULL;
} else if (tb_cache) {
if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
|| tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
__func__, __LINE__);
result = -EINVAL;
goto fail_align;
}
lpm_priv->tb_cache_size = tb_cache_size;
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = tb_cache;
} else {
lpm_priv->tb_cache_size = PS3_LPM_DEFAULT_TB_CACHE_SIZE;
lpm_priv->tb_cache_internal = kzalloc(
lpm_priv->tb_cache_size + 127, GFP_KERNEL);
if (!lpm_priv->tb_cache_internal) {
dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
"failed\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_malloc;
}
lpm_priv->tb_cache = (void *)_ALIGN_UP(
(unsigned long)lpm_priv->tb_cache_internal, 128);
}
result = lv1_construct_lpm(lpm_priv->node_id, tb_type, 0, 0,
ps3_mm_phys_to_lpar(__pa(lpm_priv->tb_cache)),
lpm_priv->tb_cache_size, &lpm_priv->lpm_id,
&lpm_priv->outlet_id, &tb_size);
if (result) {
dev_err(sbd_core(), "%s:%u: lv1_construct_lpm failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EINVAL;
goto fail_construct;
}
lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
lpm_priv->outlet_id, tb_size);
return 0;
fail_construct:
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
fail_malloc:
fail_align:
atomic_dec(&lpm_priv->open);
return result;
}
EXPORT_SYMBOL_GPL(ps3_lpm_open);
/**
* ps3_lpm_close - Close the lpm device.
*
*/
int ps3_lpm_close(void)
{
dev_dbg(sbd_core(), "%s:%u\n", __func__, __LINE__);
lv1_destruct_lpm(lpm_priv->lpm_id);
lpm_priv->lpm_id = 0;
kfree(lpm_priv->tb_cache_internal);
lpm_priv->tb_cache_internal = NULL;
atomic_dec(&lpm_priv->open);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_lpm_close);
static int __devinit ps3_lpm_probe(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u\n", __func__, __LINE__);
if (lpm_priv) {
dev_info(&dev->core, "%s:%u: called twice\n",
__func__, __LINE__);
return -EBUSY;
}
lpm_priv = kzalloc(sizeof(*lpm_priv), GFP_KERNEL);
if (!lpm_priv)
return -ENOMEM;
lpm_priv->sbd = dev;
lpm_priv->node_id = dev->lpm.node_id;
lpm_priv->pu_id = dev->lpm.pu_id;
lpm_priv->rights = dev->lpm.rights;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
return 0;
}
static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
ps3_lpm_close();
kfree(lpm_priv);
lpm_priv = NULL;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
return 0;
}
static struct ps3_system_bus_driver ps3_lpm_driver = {
.match_id = PS3_MATCH_ID_LPM,
.core.name = "ps3-lpm",
.core.owner = THIS_MODULE,
.probe = ps3_lpm_probe,
.remove = ps3_lpm_remove,
.shutdown = ps3_lpm_remove,
};
static int __init ps3_lpm_init(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
return ps3_system_bus_driver_register(&ps3_lpm_driver);
}
static void __exit ps3_lpm_exit(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
ps3_system_bus_driver_unregister(&ps3_lpm_driver);
}
module_init(ps3_lpm_init);
module_exit(ps3_lpm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 Logical Performance Monitor Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_LPM);
| gpl-2.0 |
YenneferProject/Kernel | security/min_addr.c | 13544 | 1345 | #include <linux/init.h>
#include <linux/mm.h>
#include <linux/security.h>
#include <linux/sysctl.h>
/* amount of vm to protect from userspace access by both DAC and the LSM*/
unsigned long mmap_min_addr;
/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
/* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
/*
* Update mmap_min_addr = max(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR)
*/
static void update_mmap_min_addr(void)
{
#ifdef CONFIG_LSM_MMAP_MIN_ADDR
if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
mmap_min_addr = dac_mmap_min_addr;
else
mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR;
#else
mmap_min_addr = dac_mmap_min_addr;
#endif
}
/*
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
if (write && !capable(CAP_SYS_RAWIO))
return -EPERM;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
update_mmap_min_addr();
return ret;
}
static int __init init_mmap_min_addr(void)
{
update_mmap_min_addr();
return 0;
}
pure_initcall(init_mmap_min_addr);
| gpl-2.0 |
nutsboard/linux-am335x | fs/ntfs/index.c | 13544 | 15191 | /*
* index.c - NTFS kernel index handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include "aops.h"
#include "collate.h"
#include "debug.h"
#include "index.h"
#include "ntfs.h"
/**
* ntfs_index_ctx_get - allocate and initialize a new index context
* @idx_ni: ntfs index inode with which to initialize the context
*
* Allocate a new index context, initialize it with @idx_ni and return it.
* Return NULL if allocation failed.
*
* Locking: Caller must hold i_mutex on the index inode.
*/
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
{
ntfs_index_context *ictx;
ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
if (ictx)
*ictx = (ntfs_index_context){ .idx_ni = idx_ni };
return ictx;
}
/**
* ntfs_index_ctx_put - release an index context
* @ictx: index context to free
*
* Release the index context @ictx, releasing all associated resources.
*
* Locking: Caller must hold i_mutex on the index inode.
*/
void ntfs_index_ctx_put(ntfs_index_context *ictx)
{
if (ictx->entry) {
if (ictx->is_in_root) {
if (ictx->actx)
ntfs_attr_put_search_ctx(ictx->actx);
if (ictx->base_ni)
unmap_mft_record(ictx->base_ni);
} else {
struct page *page = ictx->page;
if (page) {
BUG_ON(!PageLocked(page));
unlock_page(page);
ntfs_unmap_page(page);
}
}
}
kmem_cache_free(ntfs_index_ctx_cache, ictx);
return;
}
/**
* ntfs_index_lookup - find a key in an index and return its index entry
* @key: [IN] key for which to search in the index
* @key_len: [IN] length of @key in bytes
* @ictx: [IN/OUT] context describing the index and the returned entry
*
* Before calling ntfs_index_lookup(), @ictx must have been obtained from a
* call to ntfs_index_ctx_get().
*
* Look for the @key in the index specified by the index lookup context @ictx.
* ntfs_index_lookup() walks the contents of the index looking for the @key.
*
* If the @key is found in the index, 0 is returned and @ictx is setup to
* describe the index entry containing the matching @key. @ictx->entry is the
* index entry and @ictx->data and @ictx->data_len are the index entry data and
* its length in bytes, respectively.
*
* If the @key is not found in the index, -ENOENT is returned and @ictx is
* setup to describe the index entry whose key collates immediately after the
* search @key, i.e. this is the position in the index at which an index entry
* with a key of @key would need to be inserted.
*
* If an error occurs return the negative error code and @ictx is left
* untouched.
*
* When finished with the entry and its data, call ntfs_index_ctx_put() to free
* the context and other associated resources.
*
* If the index entry was modified, call flush_dcache_index_entry_page()
* immediately after the modification and either ntfs_index_entry_mark_dirty()
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
* ensure that the changes are written to disk.
*
* Locking: - Caller must hold i_mutex on the index inode.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
* applies the mst protection fixups before writing out and then
* removes them again after the write is complete after which it
* unlocks the page.
*/
int ntfs_index_lookup(const void *key, const int key_len,
ntfs_index_context *ictx)
{
VCN vcn, old_vcn;
ntfs_inode *idx_ni = ictx->idx_ni;
ntfs_volume *vol = idx_ni->vol;
struct super_block *sb = vol->sb;
ntfs_inode *base_ni = idx_ni->ext.base_ntfs_ino;
MFT_RECORD *m;
INDEX_ROOT *ir;
INDEX_ENTRY *ie;
INDEX_ALLOCATION *ia;
u8 *index_end, *kaddr;
ntfs_attr_search_ctx *actx;
struct address_space *ia_mapping;
struct page *page;
int rc, err = 0;
ntfs_debug("Entering.");
BUG_ON(!NInoAttr(idx_ni));
BUG_ON(idx_ni->type != AT_INDEX_ALLOCATION);
BUG_ON(idx_ni->nr_extents != -1);
BUG_ON(!base_ni);
BUG_ON(!key);
BUG_ON(key_len <= 0);
if (!ntfs_is_collation_rule_supported(
idx_ni->itype.index.collation_rule)) {
ntfs_error(sb, "Index uses unsupported collation rule 0x%x. "
"Aborting lookup.", le32_to_cpu(
idx_ni->itype.index.collation_rule));
return -EOPNOTSUPP;
}
/* Get hold of the mft record for the index inode. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
ntfs_error(sb, "map_mft_record() failed with error code %ld.",
-PTR_ERR(m));
return PTR_ERR(m);
}
actx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!actx)) {
err = -ENOMEM;
goto err_out;
}
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, idx_ni->name, idx_ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, actx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(sb, "Index root attribute missing in inode "
"0x%lx.", idx_ni->mft_no);
err = -EIO;
}
goto err_out;
}
/* Get to the index root value (it has been verified in read_inode). */
ir = (INDEX_ROOT*)((u8*)actx->attr +
le16_to_cpu(actx->attr->data.resident.value_offset));
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ir->index +
le32_to_cpu(ir->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)actx->mrec || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->length) > index_end)
goto idx_err_out;
/*
* The last entry cannot contain a key. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/* Further bounds checks. */
if ((u32)sizeof(INDEX_ENTRY_HEADER) +
le16_to_cpu(ie->key_length) >
le16_to_cpu(ie->data.vi.data_offset) ||
(u32)le16_to_cpu(ie->data.vi.data_offset) +
le16_to_cpu(ie->data.vi.data_length) >
le16_to_cpu(ie->length))
goto idx_err_out;
/* If the keys match perfectly, we setup @ictx and return 0. */
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ir_done:
ictx->is_in_root = true;
ictx->ir = ir;
ictx->actx = actx;
ictx->base_ni = base_ni;
ictx->ia = NULL;
ictx->page = NULL;
done:
ictx->entry = ie;
ictx->data = (u8*)ie +
le16_to_cpu(ie->data.vi.data_offset);
ictx->data_len = le16_to_cpu(ie->data.vi.data_length);
ntfs_debug("Done.");
return err;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
key_len, &ie->key, le16_to_cpu(ie->key_length));
/*
* If @key collates before the key of the current entry, there
* is definitely no such key in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/*
* A match should never happen as the memcmp() call should have
* cought it, but we still treat it correctly.
*/
if (!rc)
goto ir_done;
/* The keys are not equal, continue the search. */
}
/*
* We have finished with this index without success. Check for the
* presence of a child node and if not present setup @ictx and return
* -ENOENT.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
ntfs_debug("Entry not found.");
err = -ENOENT;
goto ir_done;
} /* Child node present, descend into it. */
/* Consistency check: Verify that an index allocation exists. */
if (!NInoIndexAllocPresent(idx_ni)) {
ntfs_error(sb, "No index allocation attribute but index entry "
"requires one. Inode 0x%lx is corrupt or "
"driver bug.", idx_ni->mft_no);
goto err_out;
}
/* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
ia_mapping = VFS_I(idx_ni)->i_mapping;
/*
* We are done with the index root and the mft record. Release them,
* otherwise we deadlock with ntfs_map_page().
*/
ntfs_attr_put_search_ctx(actx);
unmap_mft_record(base_ni);
m = NULL;
actx = NULL;
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
* of PAGE_CACHE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map index page, error %ld.",
-PTR_ERR(page));
err = PTR_ERR(page);
goto err_out;
}
lock_page(page);
kaddr = (u8*)page_address(page);
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
/* Bounds checks. */
if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
"0x%lx or driver bug.", idx_ni->mft_no);
goto unm_err_out;
}
/* Catch multi sector transfer fixup errors. */
if (unlikely(!ntfs_is_indx_record(ia->magic))) {
ntfs_error(sb, "Index record with vcn 0x%llx is corrupt. "
"Corrupt inode 0x%lx. Run chkdsk.",
(long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
"different from expected VCN (0x%llx). Inode "
"0x%lx is corrupt or driver bug.",
(unsigned long long)
sle64_to_cpu(ia->index_block_vcn),
(unsigned long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
idx_ni->itype.index.block_size) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has "
"a size (%u) differing from the index "
"specified size (%u). Inode is corrupt or "
"driver bug.", (unsigned long long)vcn,
idx_ni->mft_no,
le32_to_cpu(ia->index.allocated_size) + 0x18,
idx_ni->itype.index.block_size);
goto unm_err_out;
}
index_end = (u8*)ia + idx_ni->itype.index.block_size;
if (index_end > kaddr + PAGE_CACHE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
"crosses page boundary. Impossible! Cannot "
"access! This is probably a bug in the "
"driver.", (unsigned long long)vcn,
idx_ni->mft_no);
goto unm_err_out;
}
index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
if (index_end > (u8*)ia + idx_ni->itype.index.block_size) {
ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode "
"0x%lx exceeds maximum size.",
(unsigned long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ia->index +
le32_to_cpu(ia->index.entries_offset));
/*
* Iterate similar to above big loop but applied to index buffer, thus
* loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->length) > index_end) {
ntfs_error(sb, "Index entry out of bounds in inode "
"0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/*
* The last entry cannot contain a key. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/* Further bounds checks. */
if ((u32)sizeof(INDEX_ENTRY_HEADER) +
le16_to_cpu(ie->key_length) >
le16_to_cpu(ie->data.vi.data_offset) ||
(u32)le16_to_cpu(ie->data.vi.data_offset) +
le16_to_cpu(ie->data.vi.data_length) >
le16_to_cpu(ie->length)) {
ntfs_error(sb, "Index entry out of bounds in inode "
"0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/* If the keys match perfectly, we setup @ictx and return 0. */
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ia_done:
ictx->is_in_root = false;
ictx->actx = NULL;
ictx->base_ni = NULL;
ictx->ia = ia;
ictx->page = page;
goto done;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
key_len, &ie->key, le16_to_cpu(ie->key_length));
/*
* If @key collates before the key of the current entry, there
* is definitely no such key in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/*
* A match should never happen as the memcmp() call should have
* cought it, but we still treat it correctly.
*/
if (!rc)
goto ia_done;
/* The keys are not equal, continue the search. */
}
/*
* We have finished with this index buffer without success. Check for
* the presence of a child node and if not present return -ENOENT.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
ntfs_debug("Entry not found.");
err = -ENOENT;
goto ia_done;
}
if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
ntfs_error(sb, "Index entry with child node found in a leaf "
"node in inode 0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/* Child node present, descend into it. */
old_vcn = vcn;
vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
if (vcn >= 0) {
/*
* If vcn is in the same page cache page as old_vcn we recycle
* the mapped page.
*/
if (old_vcn << vol->cluster_size_bits >>
PAGE_CACHE_SHIFT == vcn <<
vol->cluster_size_bits >>
PAGE_CACHE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
goto descend_into_child_node;
}
ntfs_error(sb, "Negative child node vcn in inode 0x%lx.",
idx_ni->mft_no);
unm_err_out:
unlock_page(page);
ntfs_unmap_page(page);
err_out:
if (!err)
err = -EIO;
if (actx)
ntfs_attr_put_search_ctx(actx);
if (m)
unmap_mft_record(base_ni);
return err;
idx_err_out:
ntfs_error(sb, "Corrupt index. Aborting lookup.");
goto err_out;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.